diff --git a/.ci/bwcVersions b/.ci/bwcVersions index e51e25df6c4..1a71f98c0ef 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -43,4 +43,5 @@ BWC_VERSION: - "7.3.0" - "7.3.1" - "7.3.2" + - "7.3.3" - "7.4.0" diff --git a/.ci/init.gradle b/.ci/init.gradle index e5c71eb5881..e59541fe6ed 100644 --- a/.ci/init.gradle +++ b/.ci/init.gradle @@ -86,6 +86,10 @@ if (buildCacheUrl) { .getData(); gradle.settingsEvaluated { settings -> settings.buildCache { + local { + // Disable the local build cache in CI since we use ephemeral workers and it incurs an IO penalty + enabled = false + } remote(HttpBuildCache) { url = buildCacheUrl push = buildCachePush diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 8bdfd215543..5b56241cc57 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -105,6 +105,7 @@ dependencies { compile localGroovy() compile 'commons-codec:commons-codec:1.12' + compile 'org.apache.commons:commons-compress:1.19' compile 'com.netflix.nebula:gradle-extra-configurations-plugin:3.0.3' compile 'com.netflix.nebula:nebula-publishing-plugin:4.4.4' diff --git a/buildSrc/settings.gradle b/buildSrc/settings.gradle index bb1046b67dc..dfe4e551b3b 100644 --- a/buildSrc/settings.gradle +++ b/buildSrc/settings.gradle @@ -1 +1,2 @@ include 'reaper' +include 'symbolic-link-preserving-tar' diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 53a022a5905..ef5392156f3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -21,19 +21,21 @@ package org.elasticsearch.gradle.doc import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties -import org.elasticsearch.gradle.test.RestTestPlugin +import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.Task + /** * Sets up tests for documentation. */ -public class DocsTestPlugin extends RestTestPlugin { +class DocsTestPlugin implements Plugin { @Override - public void apply(Project project) { + void apply(Project project) { project.pluginManager.apply('elasticsearch.testclusters') project.pluginManager.apply('elasticsearch.standalone-rest-test') - super.apply(project) + project.pluginManager.apply('elasticsearch.rest-test') + String distribution = System.getProperty('tests.distribution', 'default') // The distribution can be configured with -Dtests.distribution on the command line project.testClusters.integTest.testDistribution = distribution.toUpperCase() diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index d6e131170f3..4327cbea767 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -687,9 +687,7 @@ class ClusterFormationTasks { static Task configureExecTask(String name, Project project, Task setup, NodeInfo node, Object[] execArgs) { return project.tasks.create(name: name, type: LoggedExec, dependsOn: setup) { Exec exec -> exec.workingDir node.cwd - if ((project.isRuntimeJavaHomeSet && node.isBwcNode == false) // runtime Java might not be compatible with old nodes - || node.nodeVersion.before(Version.fromString("7.0.0")) - || node.config.distribution == 'integ-test-zip') { + if (useRuntimeJava(project, node)) { exec.environment.put('JAVA_HOME', project.runtimeJavaHome) } else { // force JAVA_HOME to *not* be set @@ -707,6 +705,12 @@ class ClusterFormationTasks { } } + public static boolean useRuntimeJava(Project project, NodeInfo node) { + return (project.isRuntimeJavaHomeSet || + (node.isBwcNode == false && node.nodeVersion.before(Version.fromString("7.0.0"))) || + node.config.distribution == 'integ-test-zip') + } + /** Adds a task to start an elasticsearch node with the given configuration */ static Task configureStartTask(String name, Project project, Task setup, NodeInfo node) { // this closure is converted into ant nodes by groovy's AntBuilder @@ -714,9 +718,7 @@ class ClusterFormationTasks { ant.exec(executable: node.executable, spawn: node.config.daemonize, newenvironment: true, dir: node.cwd, taskname: 'elasticsearch') { node.env.each { key, value -> env(key: key, value: value) } - if ((project.isRuntimeJavaHomeSet && node.isBwcNode == false) // runtime Java might not be compatible with old nodes - || node.nodeVersion.before(Version.fromString("7.0.0")) - || node.config.distribution == 'integ-test-zip') { + if (useRuntimeJava(project, node)) { env(key: 'JAVA_HOME', value: project.runtimeJavaHome) } node.args.each { arg(value: it) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java index 799283ab779..8dd757d811e 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java @@ -66,6 +66,7 @@ import static org.elasticsearch.gradle.vagrant.VagrantMachine.convertWindowsPath public class DistroTestPlugin implements Plugin { private static final String GRADLE_JDK_VERSION = "12.0.1+12@69cfe15208a647278a19ef0990eea691"; + private static final String GRADLE_JDK_VENDOR = "openjdk"; // all distributions used by distro tests. this is temporary until tests are per distribution private static final String DISTRIBUTIONS_CONFIGURATION = "distributions"; @@ -138,8 +139,10 @@ public class DistroTestPlugin implements Plugin { }); } - private static Jdk createJdk(NamedDomainObjectContainer jdksContainer, String name, String version, String platform) { + private static Jdk createJdk( + NamedDomainObjectContainer jdksContainer, String name, String vendor, String version, String platform) { Jdk jdk = jdksContainer.create(name); + jdk.setVendor(vendor); jdk.setVersion(version); jdk.setPlatform(platform); return jdk; @@ -171,10 +174,10 @@ public class DistroTestPlugin implements Plugin { String box = project.getName(); // setup jdks used by the distro tests, and by gradle executing - + NamedDomainObjectContainer jdksContainer = JdkDownloadPlugin.getContainer(project); String platform = box.contains("windows") ? "windows" : "linux"; - Jdk gradleJdk = createJdk(jdksContainer, "gradle", GRADLE_JDK_VERSION, platform); + Jdk gradleJdk = createJdk(jdksContainer, "gradle", GRADLE_JDK_VENDOR, GRADLE_JDK_VERSION, platform); // setup VM used by these tests VagrantExtension vagrant = project.getExtensions().getByType(VagrantExtension.class); @@ -311,7 +314,7 @@ public class DistroTestPlugin implements Plugin { } }); } - + private List configureDistributions(Project project, Version upgradeVersion) { NamedDomainObjectContainer distributions = DistributionDownloadPlugin.getContainer(project); List currentDistros = new ArrayList<>(); diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index ca531aee8d1..d0b833cc884 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -22,10 +22,12 @@ import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.testclusters.ElasticsearchCluster import org.elasticsearch.gradle.testclusters.RestTestRunnerTask import org.elasticsearch.gradle.testclusters.TestClustersPlugin +import org.elasticsearch.gradle.tool.Boilerplate import org.elasticsearch.gradle.tool.ClasspathUtils import org.gradle.api.DefaultTask import org.gradle.api.Task import org.gradle.api.execution.TaskExecutionAdapter +import org.gradle.api.file.FileCopyDetails import org.gradle.api.logging.Logger import org.gradle.api.logging.Logging import org.gradle.api.tasks.Copy @@ -39,6 +41,7 @@ import org.gradle.process.CommandLineArgumentProvider import java.nio.charset.StandardCharsets import java.nio.file.Files import java.util.stream.Stream + /** * A wrapper task around setting up a cluster and running rest tests. */ @@ -121,10 +124,10 @@ class RestIntegTestTask extends DefaultTask { runner.systemProperty('test.cluster', System.getProperty("tests.cluster")) } - // copy the rest spec/tests into the test resources - Task copyRestSpec = createCopyRestSpecTask() - runner.dependsOn(copyRestSpec) - + // copy the rest spec/tests onto the test classpath + Copy copyRestSpec = createCopyRestSpecTask() + project.sourceSets.test.output.builtBy(copyRestSpec) + // this must run after all projects have been configured, so we know any project // references can be accessed as a fully configured project.gradle.projectsEvaluated { @@ -222,50 +225,37 @@ class RestIntegTestTask extends DefaultTask { } - /** - * Creates a task (if necessary) to copy the rest spec files. - * - * @param project The project to add the copy task to - * @param includePackagedTests true if the packaged tests should be copied, false otherwise - */ - Task createCopyRestSpecTask() { - project.configurations { - restSpec + Copy createCopyRestSpecTask() { + Boilerplate.maybeCreate(project.configurations, 'restSpec') { + project.dependencies.add( + 'restSpec', + ClasspathUtils.isElasticsearchProject() ? project.project(':rest-api-spec') : + "org.elasticsearch:rest-api-spec:${VersionProperties.elasticsearch}" + ) } - project.dependencies { - restSpec ClasspathUtils.isElasticsearchProject() ? project.project(':rest-api-spec') : - "org.elasticsearch:rest-api-spec:${VersionProperties.elasticsearch}" - } - Task copyRestSpec = project.tasks.findByName('copyRestSpec') - if (copyRestSpec != null) { - return copyRestSpec - } - Map copyRestSpecProps = [ - name : 'copyRestSpec', - type : Copy, - dependsOn: [project.configurations.restSpec, 'processTestResources'] - ] - copyRestSpec = project.tasks.create(copyRestSpecProps) { - into project.sourceSets.test.output.resourcesDir - } - project.afterEvaluate { - copyRestSpec.from({ project.zipTree(project.configurations.restSpec.singleFile) }) { - include 'rest-api-spec/api/**' - if (includePackaged) { - include 'rest-api-spec/test/**' + + return Boilerplate.maybeCreate(project.tasks, 'copyRestSpec', Copy) { Copy copy -> + copy.dependsOn project.configurations.restSpec + copy.into(project.sourceSets.test.output.resourcesDir) + copy.from({ project.zipTree(project.configurations.restSpec.singleFile) }) { + includeEmptyDirs = false + include 'rest-api-spec/**' + filesMatching('rest-api-spec/test/**') { FileCopyDetails details -> + if (includePackaged == false) { + details.exclude() + } } } - } - if (project.plugins.hasPlugin(IdeaPlugin)) { - project.idea { - module { - if (scopes.TEST != null) { - scopes.TEST.plus.add(project.configurations.restSpec) + + if (project.plugins.hasPlugin(IdeaPlugin)) { + project.idea { + module { + if (scopes.TEST != null) { + scopes.TEST.plus.add(project.configurations.restSpec) + } } } } } - return copyRestSpec } - } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/EmptyDirTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/EmptyDirTask.java index ec9b774767c..4ee2192ff34 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/EmptyDirTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/EmptyDirTask.java @@ -25,7 +25,7 @@ import javax.inject.Inject; import org.gradle.api.DefaultTask; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.TaskAction; -import org.gradle.internal.nativeintegration.filesystem.Chmod; +import org.gradle.internal.file.Chmod; /** * Creates an empty directory. diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java index 91516e26af9..7fe5e93f5f3 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Jdk.java @@ -34,19 +34,22 @@ import java.util.regex.Pattern; public class Jdk implements Buildable, Iterable { - static final Pattern VERSION_PATTERN = Pattern.compile("(\\d+)(\\.\\d+\\.\\d+)?\\+(\\d+)(@([a-f0-9]{32}))?"); - private static final List ALLOWED_PLATFORMS = Collections.unmodifiableList(Arrays.asList("linux", "windows", "darwin")); + private static final List ALLOWED_VENDORS = Collections.unmodifiableList(Arrays.asList("adoptopenjdk", "openjdk")); + static final Pattern VERSION_PATTERN = + Pattern.compile("(\\d+)(\\.\\d+\\.\\d+)?\\+(\\d+(?:\\.\\d+)?)(@([a-f0-9]{32}))?"); + private static final List ALLOWED_PLATFORMS = Collections.unmodifiableList(Arrays.asList("darwin", "linux", "windows")); private final String name; private final Configuration configuration; + private final Property vendor; private final Property version; private final Property platform; - Jdk(String name, Project project) { this.name = name; this.configuration = project.getConfigurations().create("jdk_" + name); + this.vendor = project.getObjects().property(String.class); this.version = project.getObjects().property(String.class); this.platform = project.getObjects().property(String.class); } @@ -55,6 +58,17 @@ public class Jdk implements Buildable, Iterable { return name; } + public String getVendor() { + return vendor.get(); + } + + public void setVendor(final String vendor) { + if (ALLOWED_VENDORS.contains(vendor) == false) { + throw new IllegalArgumentException("unknown vendor [" + vendor + "] for jdk [" + name + "], must be one of " + ALLOWED_VENDORS); + } + this.vendor.set(vendor); + } + public String getVersion() { return version.get(); } @@ -105,12 +119,17 @@ public class Jdk implements Buildable, Iterable { if (platform.isPresent() == false) { throw new IllegalArgumentException("platform not specified for jdk [" + name + "]"); } + if (vendor.isPresent() == false) { + throw new IllegalArgumentException("vendor not specified for jdk [" + name + "]"); + } version.finalizeValue(); platform.finalizeValue(); + vendor.finalizeValue();; } @Override public Iterator iterator() { return configuration.iterator(); } + } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java index 7c57af701fe..9dd4f35333f 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/JdkDownloadPlugin.java @@ -19,6 +19,7 @@ package org.elasticsearch.gradle; +import org.elasticsearch.gradle.tar.SymbolicLinkPreservingUntarTask; import org.gradle.api.Action; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; @@ -31,12 +32,15 @@ import org.gradle.api.artifacts.dsl.DependencyHandler; import org.gradle.api.artifacts.dsl.RepositoryHandler; import org.gradle.api.artifacts.repositories.IvyArtifactRepository; import org.gradle.api.file.CopySpec; +import org.gradle.api.file.Directory; import org.gradle.api.file.FileTree; import org.gradle.api.file.RelativePath; +import org.gradle.api.provider.Provider; import org.gradle.api.tasks.Copy; -import org.gradle.api.tasks.TaskProvider; import java.io.File; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Arrays; import java.util.HashMap; import java.util.Locale; @@ -60,6 +64,7 @@ public class JdkDownloadPlugin implements Plugin { project.afterEvaluate(p -> { for (Jdk jdk : jdksContainer) { jdk.finalizeValues(); + String vendor = jdk.getVendor(); String version = jdk.getVersion(); String platform = jdk.getPlatform(); @@ -67,18 +72,21 @@ public class JdkDownloadPlugin implements Plugin { DependencyHandler dependencies = project.getDependencies(); Map depConfig = new HashMap<>(); depConfig.put("path", ":"); // root project - depConfig.put("configuration", configName("extracted_jdk", version, platform)); + depConfig.put("configuration", configName("extracted_jdk", vendor, version, platform)); dependencies.add(jdk.getConfiguration().getName(), dependencies.project(depConfig)); // ensure a root level jdk download task exists - setupRootJdkDownload(project.getRootProject(), platform, version); + setupRootJdkDownload(project.getRootProject(), platform, vendor, version); } }); // all other repos should ignore the special jdk artifacts project.getRootProject().getRepositories().all(repo -> { if (repo.getName().startsWith(REPO_NAME_PREFIX) == false) { - repo.content(content -> content.excludeGroup("jdk")); + repo.content(content -> { + content.excludeGroup("adoptopenjdk"); + content.excludeGroup("openjdk"); + }); } }); } @@ -88,8 +96,8 @@ public class JdkDownloadPlugin implements Plugin { return (NamedDomainObjectContainer) project.getExtensions().getByName(CONTAINER_NAME); } - private static void setupRootJdkDownload(Project rootProject, String platform, String version) { - String extractTaskName = "extract" + capitalize(platform) + "Jdk" + version; + private static void setupRootJdkDownload(Project rootProject, String platform, String vendor, String version) { + String extractTaskName = "extract" + capitalize(platform) + "Jdk-" + vendor + "-" + version; // NOTE: this is *horrendous*, but seems to be the only way to check for the existence of a registered task try { rootProject.getTasks().named(extractTaskName); @@ -111,83 +119,162 @@ public class JdkDownloadPlugin implements Plugin { String hash = jdkVersionMatcher.group(5); // add fake ivy repo for jdk url - String repoName = REPO_NAME_PREFIX + version; + String repoName = REPO_NAME_PREFIX + vendor + "_" + version; RepositoryHandler repositories = rootProject.getRepositories(); if (rootProject.getRepositories().findByName(repoName) == null) { - if (hash != null) { - // current pattern since 12.0.1 + if (vendor.equals("adoptopenjdk")) { + if (hash != null) { + throw new IllegalArgumentException("adoptopenjdk versions do not have hashes but was [" + version + "]"); + } repositories.ivy(ivyRepo -> { ivyRepo.setName(repoName); - ivyRepo.setUrl("https://download.oracle.com"); + ivyRepo.setUrl("https://artifactory.elstc.co/artifactory/oss-jdk-local/"); ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); - ivyRepo.patternLayout(layout -> layout.artifact( - "java/GA/jdk" + jdkVersion + "/" + hash + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); - ivyRepo.content(content -> content.includeGroup("jdk")); + final String pattern = String.format( + Locale.ROOT, + "adoptopenjdk/OpenJDK%sU-jdk_x64_[module]_hotspot_[revision]_%s.[ext]", + jdkMajor, + jdkBuild); + ivyRepo.patternLayout(layout -> layout.artifact(pattern)); + ivyRepo.content(content -> content.includeGroup("adoptopenjdk")); }); } else { - // simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back - repositories.ivy(ivyRepo -> { - ivyRepo.setName(repoName); - ivyRepo.setUrl("https://download.oracle.com"); - ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); - ivyRepo.patternLayout(layout -> - layout.artifact("java/GA/jdk" + jdkMajor + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); - ivyRepo.content(content -> content.includeGroup("jdk")); - }); + assert vendor.equals("openjdk") : vendor; + if (hash != null) { + // current pattern since 12.0.1 + repositories.ivy(ivyRepo -> { + ivyRepo.setName(repoName); + ivyRepo.setUrl("https://download.oracle.com"); + ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + ivyRepo.patternLayout(layout -> layout.artifact( + "java/GA/jdk" + jdkVersion + "/" + hash + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); + ivyRepo.content(content -> content.includeGroup("openjdk")); + }); + } else { + // simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back + repositories.ivy(ivyRepo -> { + ivyRepo.setName(repoName); + ivyRepo.setUrl("https://download.oracle.com"); + ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + ivyRepo.patternLayout(layout -> + layout.artifact("java/GA/jdk" + jdkMajor + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]")); + ivyRepo.content(content -> content.includeGroup("openjdk")); + }); + } } } // add the jdk as a "dependency" final ConfigurationContainer configurations = rootProject.getConfigurations(); - String remoteConfigName = configName("openjdk", version, platform); - String localConfigName = configName("extracted_jdk", version, platform); + String remoteConfigName = configName(vendor, version, platform); + String localConfigName = configName("extracted_jdk", vendor, version, platform); Configuration jdkConfig = configurations.findByName(remoteConfigName); if (jdkConfig == null) { jdkConfig = configurations.create(remoteConfigName); configurations.create(localConfigName); } + String platformDep = platform.equals("darwin") ? (vendor.equals("adoptopenjdk") ? "mac" : "osx") : platform; String extension = platform.equals("windows") ? "zip" : "tar.gz"; - String jdkDep = "jdk:" + (platform.equals("darwin") ? "osx" : platform) + ":" + jdkVersion + "@" + extension; - rootProject.getDependencies().add(configName("openjdk", version, platform), jdkDep); + String jdkDep = vendor + ":" + platformDep + ":" + jdkVersion + "@" + extension; + rootProject.getDependencies().add(configName(vendor, version, platform), jdkDep); // add task for extraction - // TODO: look into doing this as an artifact transform, which are cacheable starting in gradle 5.3 - int rootNdx = platform.equals("darwin") ? 2 : 1; - Action removeRootDir = copy -> { - // remove extra unnecessary directory levels - copy.eachFile(details -> { - String[] pathSegments = details.getRelativePath().getSegments(); - String[] newPathSegments = Arrays.copyOfRange(pathSegments, rootNdx, pathSegments.length); - details.setRelativePath(new RelativePath(true, newPathSegments)); - }); - copy.setIncludeEmptyDirs(false); - }; + final Provider extractPath = + rootProject.getLayout().getBuildDirectory().dir("jdks/" + vendor + "-" + jdkVersion + "_" + platform); + // delay resolving jdkConfig until runtime Supplier jdkArchiveGetter = jdkConfig::getSingleFile; - final Callable fileGetter; + final Object extractTask; if (extension.equals("zip")) { - fileGetter = () -> rootProject.zipTree(jdkArchiveGetter.get()); - } else { - fileGetter = () -> rootProject.tarTree(rootProject.getResources().gzip(jdkArchiveGetter.get())); - } - String extractDir = rootProject.getBuildDir().toPath().resolve("jdks/openjdk-" + jdkVersion + "_" + platform).toString(); - TaskProvider extractTask = rootProject.getTasks().register(extractTaskName, Copy.class, copyTask -> { - copyTask.doFirst(new Action() { - @Override - public void execute(Task t) { - rootProject.delete(extractDir); - } + final Callable fileGetter = () -> rootProject.zipTree(jdkArchiveGetter.get()); + // TODO: look into doing this as an artifact transform, which are cacheable starting in gradle 5.3 + Action removeRootDir = copy -> { + // remove extra unnecessary directory levels + copy.eachFile(details -> { + /* + * We want to remove up to the and including the jdk-.* relative paths. That is a JDK archive is structured as: + * jdk-12.0.1/ + * jdk-12.0.1/Contents + * ... + * + * and we want to remove the leading jdk-12.0.1. Note however that there could also be a leading ./ as in + * ./ + * ./jdk-12.0.1/ + * ./jdk-12.0.1/Contents + * + * so we account for this and search the path components until we find the jdk-12.0.1, and strip the leading components. + */ + String[] pathSegments = details.getRelativePath().getSegments(); + int index = 0; + for (; index < pathSegments.length; index++) { + if (pathSegments[index].matches("jdk-.*")) break; + } + assert index + 1 <= pathSegments.length; + String[] newPathSegments = Arrays.copyOfRange(pathSegments, index + 1, pathSegments.length); + details.setRelativePath(new RelativePath(true, newPathSegments)); + }); + copy.setIncludeEmptyDirs(false); + }; + extractTask = rootProject.getTasks().register(extractTaskName, Copy.class, copyTask -> { + copyTask.doFirst(new Action() { + @Override + public void execute(Task t) { + rootProject.delete(extractPath); + } + }); + copyTask.into(extractPath); + copyTask.from(fileGetter, removeRootDir); }); - copyTask.into(extractDir); - copyTask.from(fileGetter, removeRootDir); - }); + } else { + /* + * Gradle TarFileTree does not resolve symlinks, so we have to manually extract and preserve the symlinks. + * cf. https://github.com/gradle/gradle/issues/3982 and https://discuss.gradle.org/t/tar-and-untar-losing-symbolic-links/2039 + */ + final Configuration jdkConfiguration = jdkConfig; + extractTask = rootProject.getTasks().register(extractTaskName, SymbolicLinkPreservingUntarTask.class, task -> { + task.getTarFile().set(jdkConfiguration.getSingleFile()); + task.getExtractPath().set(extractPath); + task.setTransform( + name -> { + /* + * We want to remove up to the and including the jdk-.* relative paths. That is a JDK archive is structured as: + * jdk-12.0.1/ + * jdk-12.0.1/Contents + * ... + * + * and we want to remove the leading jdk-12.0.1. Note however that there could also be a leading ./ as in + * ./ + * ./jdk-12.0.1/ + * ./jdk-12.0.1/Contents + * + * so we account for this and search the path components until we find the jdk-12.0.1, and strip the leading + * components. + */ + final Path entryName = Paths.get(name); + int index = 0; + for (; index < entryName.getNameCount(); index++) { + if (entryName.getName(index).toString().matches("jdk-.*")) break; + } + if (index + 1 >= entryName.getNameCount()) { + // this happens on the top-level directories in the archive, which we are removing + return null; + } + // finally remove the top-level directories from the output path + return entryName.subpath(index + 1, entryName.getNameCount()); + }); + }); + } rootProject.getArtifacts().add(localConfigName, - rootProject.getLayout().getProjectDirectory().dir(extractDir), + extractPath, artifact -> artifact.builtBy(extractTask)); } - private static String configName(String prefix, String version, String platform) { - return prefix + "_" + version + "_" + platform; + private static String configName(String vendor, String version, String platform) { + return vendor + "_" + version + "_" + platform; + } + + private static String configName(String prefix, String vendor, String version, String platform) { + return prefix + "_" + vendor + "_" + version + "_" + platform; } private static String capitalize(String s) { diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java index 3d247d2dd3e..cbe61a54ae9 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GenerateGlobalBuildInfoTask.java @@ -120,9 +120,9 @@ public class GenerateGlobalBuildInfoTask extends DefaultTask { @TaskAction public void generate() { - String javaVendor = System.getProperty("java.vendor"); + String javaVendorVersion = System.getProperty("java.vendor.version", System.getProperty("java.vendor")); String gradleJavaVersion = System.getProperty("java.version"); - String gradleJavaVersionDetails = javaVendor + " " + gradleJavaVersion + " [" + System.getProperty("java.vm.name") + String gradleJavaVersionDetails = javaVendorVersion + " " + gradleJavaVersion + " [" + System.getProperty("java.vm.name") + " " + System.getProperty("java.vm.version") + "]"; String compilerJavaVersionDetails = gradleJavaVersionDetails; @@ -231,8 +231,10 @@ public class GenerateGlobalBuildInfoTask extends DefaultTask { */ private String findJavaVersionDetails(File javaHome) { String versionInfoScript = "print(" + - "java.lang.System.getProperty(\"java.vendor\") + \" \" + java.lang.System.getProperty(\"java.version\") + " + - "\" [\" + java.lang.System.getProperty(\"java.vm.name\") + \" \" + java.lang.System.getProperty(\"java.vm.version\") + \"]\");"; + "java.lang.System.getProperty(\"java.vendor.version\", java.lang.System.getProperty(\"java.vendor\")) + \" \" + " + + "java.lang.System.getProperty(\"java.version\") + \" [\" + " + + "java.lang.System.getProperty(\"java.vm.name\") + \" \" + " + + "java.lang.System.getProperty(\"java.vm.version\") + \"]\");"; return runJavaAsScript(javaHome, versionInfoScript).trim(); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java index d97f5fb2cd3..b59d9806a0b 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/info/GlobalBuildInfoPlugin.java @@ -95,6 +95,7 @@ public class GlobalBuildInfoPlugin implements Plugin { ext.set("gradleJavaVersion", Jvm.current().getJavaVersion()); ext.set("gitRevision", gitRevision(project.getRootProject().getRootDir())); ext.set("buildDate", ZonedDateTime.now(ZoneOffset.UTC)); + ext.set("isCi", System.getenv("JENKINS_URL") != null); }); } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTar.java b/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTar.java new file mode 100644 index 00000000000..f76289dc591 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTar.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.tar; + +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.commons.compress.archivers.tar.TarConstants; +import org.apache.commons.compress.archivers.zip.UnixStat; +import org.gradle.api.GradleException; +import org.gradle.api.file.FileCopyDetails; +import org.gradle.api.file.RegularFile; +import org.gradle.api.internal.file.CopyActionProcessingStreamAction; +import org.gradle.api.internal.file.archive.compression.ArchiveOutputStreamFactory; +import org.gradle.api.internal.file.archive.compression.Bzip2Archiver; +import org.gradle.api.internal.file.archive.compression.GzipArchiver; +import org.gradle.api.internal.file.archive.compression.SimpleCompressor; +import org.gradle.api.internal.file.copy.CopyAction; +import org.gradle.api.internal.file.copy.CopyActionProcessingStream; +import org.gradle.api.internal.file.copy.FileCopyDetailsInternal; +import org.gradle.api.provider.Provider; +import org.gradle.api.tasks.WorkResult; +import org.gradle.api.tasks.WorkResults; +import org.gradle.api.tasks.bundling.Tar; + +import java.io.File; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.file.Files; +import java.util.HashSet; +import java.util.Set; + +/** + * A custom archive task that assembles a tar archive that preserves symbolic links. + * + * This task is necessary because the built-in task {@link org.gradle.api.tasks.bundling.Tar} does not preserve symbolic links. + */ +public class SymbolicLinkPreservingTar extends Tar { + + @Override + protected CopyAction createCopyAction() { + final ArchiveOutputStreamFactory compressor; + switch (getCompression()) { + case BZIP2: + compressor = Bzip2Archiver.getCompressor(); + break; + case GZIP: + compressor = GzipArchiver.getCompressor(); + break; + default: + compressor = new SimpleCompressor(); + break; + } + return new SymbolicLinkPreservingTarCopyAction(getArchiveFile(), compressor, isPreserveFileTimestamps()); + } + + private static class SymbolicLinkPreservingTarCopyAction implements CopyAction { + + private final Provider tarFile; + private final ArchiveOutputStreamFactory compressor; + private final boolean isPreserveFileTimestamps; + + SymbolicLinkPreservingTarCopyAction( + final Provider tarFile, + final ArchiveOutputStreamFactory compressor, + final boolean isPreserveFileTimestamps) { + this.tarFile = tarFile; + this.compressor = compressor; + this.isPreserveFileTimestamps = isPreserveFileTimestamps; + } + + @Override + public WorkResult execute(final CopyActionProcessingStream stream) { + try (OutputStream out = compressor.createArchiveOutputStream(tarFile.get().getAsFile()); + TarArchiveOutputStream tar = new TarArchiveOutputStream(out)) { + tar.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU); + stream.process(new SymbolicLinkPreservingTarStreamAction(tar)); + } catch (final IOException e) { + throw new GradleException("failed writing tar file [" + tarFile + "]", e); + } + + return WorkResults.didWork(true); + } + + private class SymbolicLinkPreservingTarStreamAction implements CopyActionProcessingStreamAction { + + private final TarArchiveOutputStream tar; + + /* + * When Gradle walks the file tree, it will follow symbolic links. This means that if there is a symbolic link to a directory + * in the source file tree, we could otherwise end up duplicating the entries below that directory in the resulting tar archive. + * To avoid this, we track which symbolic links we have visited, and skip files that are children of symbolic links that we have + * already visited. + */ + private final Set visitedSymbolicLinks = new HashSet<>(); + + SymbolicLinkPreservingTarStreamAction(final TarArchiveOutputStream tar) { + this.tar = tar; + } + + @Override + public void processFile(final FileCopyDetailsInternal details) { + if (isChildOfVisitedSymbolicLink(details) == false) { + if (isSymbolicLink(details)) { + visitSymbolicLink(details); + } else if (details.isDirectory()) { + visitDirectory(details); + } else { + visitFile(details); + } + } + } + + private boolean isChildOfVisitedSymbolicLink(final FileCopyDetailsInternal details) { + final File file; + try { + file = details.getFile(); + } catch (final UnsupportedOperationException e) { + // we get invoked with stubbed details, there is no way to introspect this other than catching this exception + return false; + } + for (final File symbolicLink : visitedSymbolicLinks) { + if (isChildOf(symbolicLink, file)) return true; + } + return false; + } + + private boolean isChildOf(final File directory, final File file) { + return file.toPath().startsWith(directory.toPath()); + } + + private boolean isSymbolicLink(final FileCopyDetailsInternal details) { + final File file; + try { + file = details.getFile(); + } catch (final UnsupportedOperationException e) { + // we get invoked with stubbed details, there is no way to introspect this other than catching this exception + return false; + } + return Files.isSymbolicLink(file.toPath()); + } + + private void visitSymbolicLink(final FileCopyDetailsInternal details) { + visitedSymbolicLinks.add(details.getFile()); + final TarArchiveEntry entry = new TarArchiveEntry(details.getRelativePath().getPathString(), TarConstants.LF_SYMLINK); + entry.setModTime(getModTime(details)); + entry.setMode(UnixStat.LINK_FLAG | details.getMode()); + try { + entry.setLinkName(Files.readSymbolicLink(details.getFile().toPath()).toString()); + tar.putArchiveEntry(entry); + tar.closeArchiveEntry(); + } catch (final IOException e) { + handleProcessingException(details, e); + } + } + + private void visitDirectory(final FileCopyDetailsInternal details) { + final TarArchiveEntry entry = new TarArchiveEntry(details.getRelativePath().getPathString() + "/"); + entry.setModTime(getModTime(details)); + entry.setMode(UnixStat.DIR_FLAG | details.getMode()); + try { + tar.putArchiveEntry(entry); + tar.closeArchiveEntry(); + } catch (final IOException e) { + handleProcessingException(details, e); + } + } + + private void visitFile(final FileCopyDetailsInternal details) { + final TarArchiveEntry entry = new TarArchiveEntry(details.getRelativePath().getPathString()); + entry.setModTime(getModTime(details)); + entry.setMode(UnixStat.FILE_FLAG | details.getMode()); + entry.setSize(details.getSize()); + try { + tar.putArchiveEntry(entry); + details.copyTo(tar); + tar.closeArchiveEntry(); + } catch (final IOException e) { + handleProcessingException(details, e); + } + } + + private void handleProcessingException(final FileCopyDetailsInternal details, final IOException e) { + throw new GradleException("could not add [" + details + "] to tar file [" + tarFile + "]", e); + } + + } + + private long getModTime(final FileCopyDetails details) { + return isPreserveFileTimestamps ? details.getLastModified() : 0; + } + + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingUntarTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingUntarTask.java new file mode 100644 index 00000000000..183d7721b8a --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingUntarTask.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.tar; + +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; +import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream; +import org.gradle.api.DefaultTask; +import org.gradle.api.GradleException; +import org.gradle.api.file.DirectoryProperty; +import org.gradle.api.file.RegularFileProperty; +import org.gradle.api.model.ObjectFactory; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputFile; +import org.gradle.api.tasks.OutputDirectory; +import org.gradle.api.tasks.TaskAction; + +import javax.inject.Inject; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.attribute.PosixFileAttributeView; +import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.PosixFilePermissions; +import java.util.Set; +import java.util.function.Function; + +/** + * A custom task that explodes a tar archive that preserves symbolic links. + * + * This task is necessary because the built-in task {@link org.gradle.api.internal.file.archive.TarFileTree} does not preserve symbolic + * links. + */ +public class SymbolicLinkPreservingUntarTask extends DefaultTask { + + private final RegularFileProperty tarFile; + + @InputFile + public RegularFileProperty getTarFile() { + return tarFile; + } + + private final DirectoryProperty extractPath; + + @OutputDirectory + public DirectoryProperty getExtractPath() { + return extractPath; + } + + private Function transform; + + /** + * A transform to apply to the tar entry, to derive the relative path from the entry name. If the return value is null, the entry is + * dropped from the exploded tar archive. + * + * @param transform the transform + */ + @Input + public void setTransform(Function transform) { + this.transform = transform; + } + + @Inject + public SymbolicLinkPreservingUntarTask(final ObjectFactory objectFactory) { + this.tarFile = objectFactory.fileProperty(); + this.extractPath = objectFactory.directoryProperty(); + this.transform = name -> Paths.get(name); + } + + @TaskAction + final void execute() { + // ensure the target extraction path is empty + getProject().delete(extractPath); + try (TarArchiveInputStream tar = + new TarArchiveInputStream(new GzipCompressorInputStream(new FileInputStream(tarFile.getAsFile().get())))) { + final Path destinationPath = extractPath.get().getAsFile().toPath(); + TarArchiveEntry entry = tar.getNextTarEntry(); + while (entry != null) { + final Path relativePath = transform.apply(entry.getName()); + if (relativePath == null) { + entry = tar.getNextTarEntry(); + continue; + } + + final Path destination = destinationPath.resolve(relativePath); + final Path parent = destination.getParent(); + if (Files.exists(parent) == false) { + Files.createDirectories(parent); + } + if (entry.isDirectory()) { + Files.createDirectory(destination); + } else if (entry.isSymbolicLink()) { + Files.createSymbolicLink(destination, Paths.get(entry.getLinkName())); + } else { + // copy the file from the archive using a small buffer to avoid heaping + Files.createFile(destination); + try (FileOutputStream fos = new FileOutputStream(destination.toFile())) { + tar.transferTo(fos); + } + } + if (entry.isSymbolicLink() == false) { + // check if the underlying file system supports POSIX permissions + final PosixFileAttributeView view = Files.getFileAttributeView(destination, PosixFileAttributeView.class); + if (view != null) { + final Set permissions = PosixFilePermissions.fromString( + permissions((entry.getMode() >> 6) & 07) + + permissions((entry.getMode() >> 3) & 07) + + permissions((entry.getMode() >> 0) & 07)); + Files.setPosixFilePermissions(destination, permissions); + } + } + entry = tar.getNextTarEntry(); + } + } catch (final IOException e) { + throw new GradleException("unable to extract tar [" + tarFile.getAsFile().get().toPath() + "]", e); + } + } + + private String permissions(final int permissions) { + if (permissions < 0 || permissions > 7) { + throw new IllegalArgumentException("permissions [" + permissions + "] out of range"); + } + final StringBuilder sb = new StringBuilder(3); + if ((permissions & 4) == 4) { + sb.append('r'); + } else { + sb.append('-'); + } + if ((permissions & 2) == 2) { + sb.append('w'); + } else { + sb.append('-'); + } + if ((permissions & 1) == 1) { + sb.append('x'); + } else { + sb.append('-'); + } + return sb.toString(); + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymoblicLinkPreservingTarPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymoblicLinkPreservingTarPlugin.java new file mode 100644 index 00000000000..84f9d52ab76 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/tar/SymoblicLinkPreservingTarPlugin.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.tar; + +import org.gradle.api.Plugin; +import org.gradle.api.Project; + +public class SymoblicLinkPreservingTarPlugin implements Plugin { + + @Override + public void apply(final Project target) { + + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index c1baadaf0e2..d0aebc67d18 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -599,8 +599,11 @@ public class ElasticsearchNode implements TestClusterConfiguration { }) .collect(Collectors.joining(" ")); } - defaultEnv.put("ES_JAVA_OPTS", "-Xms512m -Xmx512m -ea -esa" + - systemPropertiesString + jvmArgsString + defaultEnv.put("ES_JAVA_OPTS", "-Xms512m -Xmx512m -ea -esa " + + systemPropertiesString + " " + + jvmArgsString + " " + + // Support passing in additional JVM arguments + System.getProperty("tests.jvm.argline", "") ); defaultEnv.put("ES_TMPDIR", tmpDir.toString()); // Windows requires this as it defaults to `c:\windows` despite ES_TMPDIR diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/tool/Boilerplate.java b/buildSrc/src/main/java/org/elasticsearch/gradle/tool/Boilerplate.java index 760e5f60f1c..f4ce626f7d6 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/tool/Boilerplate.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/tool/Boilerplate.java @@ -20,6 +20,7 @@ package org.elasticsearch.gradle.tool; import org.gradle.api.Action; import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.PolymorphicDomainObjectContainer; import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.UnknownTaskException; @@ -52,6 +53,16 @@ public abstract class Boilerplate { } + public static T maybeCreate(PolymorphicDomainObjectContainer collection, String name, Class type, Action action) { + return Optional.ofNullable(collection.findByName(name)) + .orElseGet(() -> { + T result = collection.create(name, type); + action.execute(result); + return result; + }); + + } + public static TaskProvider maybeRegister(TaskContainer tasks, String name, Class clazz, Action action) { try { return tasks.named(name, clazz); diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.symbolic-link-preserving-tar.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.symbolic-link-preserving-tar.properties new file mode 100644 index 00000000000..0ad0ead7dbd --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.symbolic-link-preserving-tar.properties @@ -0,0 +1 @@ +implementation-class=org.elasticsearch.gradle.tar.SymoblicLinkPreservingTarPlugin diff --git a/buildSrc/src/main/resources/minimumGradleVersion b/buildSrc/src/main/resources/minimumGradleVersion index e5e7441d3e9..475e1842948 100644 --- a/buildSrc/src/main/resources/minimumGradleVersion +++ b/buildSrc/src/main/resources/minimumGradleVersion @@ -1 +1 @@ -5.5 \ No newline at end of file +5.6.2 \ No newline at end of file diff --git a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/VersionProperties.java b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/VersionProperties.java index cdb0f01cf75..45ce676ce03 100644 --- a/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/VersionProperties.java +++ b/buildSrc/src/minimumRuntime/java/org/elasticsearch/gradle/VersionProperties.java @@ -23,6 +23,10 @@ public class VersionProperties { return bundledJdk; } + public static String getBundledJdkVendor() { + return bundledJdkVendor; + } + public static Map getVersions() { return versions; } @@ -30,12 +34,14 @@ public class VersionProperties { private static final String elasticsearch; private static final String lucene; private static final String bundledJdk; + private static final String bundledJdkVendor; private static final Map versions = new HashMap(); static { Properties props = getVersionProperties(); elasticsearch = props.getProperty("elasticsearch"); lucene = props.getProperty("lucene"); + bundledJdkVendor = props.getProperty("bundled_jdk_vendor"); bundledJdk = props.getProperty("bundled_jdk"); for (String property : props.stringPropertyNames()) { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/AdoptOpenJdkDownloadPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/AdoptOpenJdkDownloadPluginIT.java new file mode 100644 index 00000000000..bbf1b63cf27 --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/AdoptOpenJdkDownloadPluginIT.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import java.io.IOException; +import java.io.InputStream; + +public class AdoptOpenJdkDownloadPluginIT extends JdkDownloadPluginIT { + + @Override + public String oldJdkVersion() { + return "1+99"; + } + + @Override + public String jdkVersion() { + return "12.0.2+10"; + } + + @Override + public String jdkVendor() { + return "adoptopenjdk"; + } + + @Override + protected String urlPath(final boolean isOld, final String platform, final String extension) { + final String module = platform.equals("osx") ? "mac" : platform; + if (isOld) { + return "/adoptopenjdk/OpenJDK1U-jdk_x64_" + module + "_hotspot_1_99." + extension; + } else { + return "/adoptopenjdk/OpenJDK12U-jdk_x64_" + module + "_hotspot_12.0.2_10." + extension; + } + } + + @Override + protected byte[] filebytes(final String platform, final String extension) throws IOException { + try (InputStream stream = JdkDownloadPluginIT.class.getResourceAsStream("fake_adoptopenjdk_" + platform + "." + extension)) { + return stream.readAllBytes(); + } + } + +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java index 9d612da610a..e31ce72d89a 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginIT.java @@ -25,7 +25,6 @@ import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; import java.io.IOException; -import java.io.InputStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -39,74 +38,79 @@ import static com.github.tomakehurst.wiremock.client.WireMock.head; import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; import static org.hamcrest.CoreMatchers.equalTo; -public class JdkDownloadPluginIT extends GradleIntegrationTestCase { +public abstract class JdkDownloadPluginIT extends GradleIntegrationTestCase { - private static final String OLD_JDK_VERSION = "1+99"; - private static final String JDK_VERSION = "12.0.1+99@123456789123456789123456789abcde"; private static final Pattern JDK_HOME_LOGLINE = Pattern.compile("JDK HOME: (.*)"); private static final Pattern NUM_CONFIGS_LOGLINE = Pattern.compile("NUM CONFIGS: (.*)"); - public void testLinuxExtraction() throws IOException { - assertExtraction("getLinuxJdk", "linux", "bin/java", JDK_VERSION); + protected abstract String oldJdkVersion(); + + protected abstract String jdkVersion(); + + protected abstract String jdkVendor(); + + public final void testLinuxExtraction() throws IOException { + assertExtraction("getLinuxJdk", "linux", "bin/java", jdkVendor(), jdkVersion()); } - public void testDarwinExtraction() throws IOException { - assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java", JDK_VERSION); + public final void testDarwinExtraction() throws IOException { + assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java", jdkVendor(), jdkVersion()); } - public void testWindowsExtraction() throws IOException { - assertExtraction("getWindowsJdk", "windows", "bin/java", JDK_VERSION); + public final void testWindowsExtraction() throws IOException { + assertExtraction("getWindowsJdk", "windows", "bin/java", jdkVendor(), jdkVersion()); } - public void testLinuxExtractionOldVersion() throws IOException { - assertExtraction("getLinuxJdk", "linux", "bin/java", OLD_JDK_VERSION); + public final void testLinuxExtractionOldVersion() throws IOException { + assertExtraction("getLinuxJdk", "linux", "bin/java", jdkVendor(), oldJdkVersion()); } - public void testDarwinExtractionOldVersion() throws IOException { - assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java", OLD_JDK_VERSION); + public final void testDarwinExtractionOldVersion() throws IOException { + assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java", jdkVendor(), oldJdkVersion()); } - public void testWindowsExtractionOldVersion() throws IOException { - assertExtraction("getWindowsJdk", "windows", "bin/java", OLD_JDK_VERSION); + public final void testWindowsExtractionOldVersion() throws IOException { + assertExtraction("getWindowsJdk", "windows", "bin/java", jdkVendor(), oldJdkVersion()); } - public void testCrossProjectReuse() throws IOException { + public final void testCrossProjectReuse() throws IOException { runBuild("numConfigurations", "linux", result -> { Matcher matcher = NUM_CONFIGS_LOGLINE.matcher(result.getOutput()); assertTrue("could not find num configs in output: " + result.getOutput(), matcher.find()); assertThat(Integer.parseInt(matcher.group(1)), equalTo(6)); // 3 import configs, 3 export configs - }, JDK_VERSION); + }, jdkVendor(), jdkVersion()); } - public void assertExtraction(String taskname, String platform, String javaBin, String version) throws IOException { + private void assertExtraction(String taskname, String platform, String javaBin, String vendor, String version) throws IOException { runBuild(taskname, platform, result -> { Matcher matcher = JDK_HOME_LOGLINE.matcher(result.getOutput()); assertTrue("could not find jdk home in output: " + result.getOutput(), matcher.find()); String jdkHome = matcher.group(1); Path javaPath = Paths.get(jdkHome, javaBin); assertTrue(javaPath.toString(), Files.exists(javaPath)); - }, version); + }, vendor, version); } - private void runBuild(String taskname, String platform, Consumer assertions, String version) throws IOException { + protected abstract String urlPath(boolean isOld, String platform, String extension); + + protected abstract byte[] filebytes(String platform, String extension) throws IOException; + + private void runBuild( + String taskname, String platform, Consumer assertions, String vendor, String version) throws IOException { WireMockServer wireMock = new WireMockServer(0); try { String extension = platform.equals("windows") ? "zip" : "tar.gz"; - boolean isOld = version.equals(OLD_JDK_VERSION); - String filename = "openjdk-" + (isOld ? "1" : "12.0.1") + "_" + platform + "-x64_bin." + extension; - final byte[] filebytes; - try (InputStream stream = JdkDownloadPluginIT.class.getResourceAsStream("fake_openjdk_" + platform + "." + extension)) { - filebytes = stream.readAllBytes(); - } - String versionPath = isOld ? "jdk1/99" : "jdk12.0.1/123456789123456789123456789abcde/99"; - String urlPath = "/java/GA/" + versionPath + "/GPL/" + filename; - wireMock.stubFor(head(urlEqualTo(urlPath)).willReturn(aResponse().withStatus(200))); - wireMock.stubFor(get(urlEqualTo(urlPath)).willReturn(aResponse().withStatus(200).withBody(filebytes))); + boolean isOld = version.equals(oldJdkVersion()); + + wireMock.stubFor(head(urlEqualTo(urlPath(isOld, platform, extension))).willReturn(aResponse().withStatus(200))); + wireMock.stubFor(get(urlEqualTo(urlPath(isOld, platform, extension))) + .willReturn(aResponse().withStatus(200).withBody(filebytes(platform, extension)))); wireMock.start(); GradleRunner runner = GradleRunner.create().withProjectDir(getProjectDir("jdk-download")) .withArguments(taskname, "-Dlocal.repo.path=" + getLocalTestRepoPath(), + "-Dtests.jdk_vendor=" + vendor, "-Dtests.jdk_version=" + version, "-Dtests.jdk_repo=" + wireMock.baseUrl(), "-i") diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java index c6ca817e759..e3206e90a9f 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/JdkDownloadPluginTests.java @@ -35,32 +35,50 @@ public class JdkDownloadPluginTests extends GradleUnitTestCase { rootProject = ProjectBuilder.builder().build(); } + public void testMissingVendor() { + assertJdkError(createProject(), "testjdk", null, "11.0.2+33", "linux", "vendor not specified for jdk [testjdk]"); + } + + public void testUnknownVendor() { + assertJdkError( + createProject(), + "testjdk", + "unknown", + "11.0.2+33", + "linux", + "unknown vendor [unknown] for jdk [testjdk], must be one of [adoptopenjdk, openjdk]"); + } + public void testMissingVersion() { - assertJdkError(createProject(), "testjdk", null, "linux", "version not specified for jdk [testjdk]"); - } - - public void testMissingPlatform() { - assertJdkError(createProject(), "testjdk", "11.0.2+33", null, "platform not specified for jdk [testjdk]"); - } - - public void testUnknownPlatform() { - assertJdkError(createProject(), "testjdk", "11.0.2+33", "unknown", - "unknown platform [unknown] for jdk [testjdk], must be one of [linux, windows, darwin]"); + assertJdkError(createProject(), "testjdk", "openjdk", null, "linux", "version not specified for jdk [testjdk]"); } public void testBadVersionFormat() { - assertJdkError(createProject(), "testjdk", "badversion", "linux", "malformed version [badversion] for jdk [testjdk]"); + assertJdkError(createProject(), "testjdk", "openjdk", "badversion", "linux", "malformed version [badversion] for jdk [testjdk]"); } - private void assertJdkError(Project project, String name, String version, String platform, String message) { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createJdk(project, name, version, platform)); + public void testMissingPlatform() { + assertJdkError(createProject(), "testjdk", "openjdk", "11.0.2+33", null, "platform not specified for jdk [testjdk]"); + } + + public void testUnknownPlatform() { + assertJdkError(createProject(), "testjdk", "openjdk", "11.0.2+33", "unknown", + "unknown platform [unknown] for jdk [testjdk], must be one of [darwin, linux, windows]"); + } + + private void assertJdkError(Project project, String name, String vendor, String version, String platform, String message) { + IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> createJdk(project, name, vendor, version, platform)); assertThat(e.getMessage(), equalTo(message)); } - private void createJdk(Project project, String name, String version, String platform) { + private void createJdk(Project project, String name, String vendor, String version, String platform) { @SuppressWarnings("unchecked") NamedDomainObjectContainer jdks = (NamedDomainObjectContainer) project.getExtensions().getByName("jdks"); jdks.create(name, jdk -> { + if (vendor != null) { + jdk.setVendor(vendor); + } if (version != null) { jdk.setVersion(version); } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/OpenJdkDownloadPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/OpenJdkDownloadPluginIT.java new file mode 100644 index 00000000000..b01a4b022fe --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/OpenJdkDownloadPluginIT.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import java.io.IOException; +import java.io.InputStream; + +public class OpenJdkDownloadPluginIT extends JdkDownloadPluginIT { + + @Override + public String oldJdkVersion() { + return "1+99"; + } + + @Override + public String jdkVersion() { + return "12.0.1+99@123456789123456789123456789abcde"; + } + + @Override + protected String jdkVendor() { + return "openjdk"; + } + + @Override + protected String urlPath(final boolean isOld, final String platform, final String extension) { + final String versionPath = isOld ? "jdk1/99" : "jdk12.0.1/123456789123456789123456789abcde/99"; + final String filename = "openjdk-" + (isOld ? "1" : "12.0.1") + "_" + platform + "-x64_bin." + extension; + return "/java/GA/" + versionPath + "/GPL/" + filename; + } + + @Override + protected byte[] filebytes(final String platform, final String extension) throws IOException { + try (InputStream stream = JdkDownloadPluginIT.class.getResourceAsStream("fake_openjdk_" + platform + "." + extension)) { + return stream.readAllBytes(); + } + } + +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTarIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTarIT.java new file mode 100644 index 00000000000..c7b59f64252 --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/tar/SymbolicLinkPreservingTarIT.java @@ -0,0 +1,157 @@ +package org.elasticsearch.gradle.tar; + +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; +import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream; +import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream; +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.api.GradleException; +import org.gradle.testkit.runner.GradleRunner; +import org.junit.Before; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; + +import static org.hamcrest.CoreMatchers.anyOf; +import static org.hamcrest.CoreMatchers.equalTo; + +public class SymbolicLinkPreservingTarIT extends GradleIntegrationTestCase { + + @Rule + public final TemporaryFolder temporaryFolder = new TemporaryFolder(); + + @Before + public void before() throws IOException { + final Path realFolder = temporaryFolder.getRoot().toPath().resolve("real-folder"); + Files.createDirectory(realFolder); + Files.createFile(realFolder.resolve("file")); + Files.createSymbolicLink(realFolder.resolve("link-to-file"), Paths.get("./file")); + final Path linkInFolder = temporaryFolder.getRoot().toPath().resolve("link-in-folder"); + Files.createDirectory(linkInFolder); + Files.createSymbolicLink(linkInFolder.resolve("link-to-file"), Paths.get("../real-folder/file")); + final Path linkToRealFolder = temporaryFolder.getRoot().toPath().resolve("link-to-real-folder"); + Files.createSymbolicLink(linkToRealFolder, Paths.get("./real-folder")); + } + + public void testBZip2Tar() throws IOException { + runBuild("buildBZip2Tar", true); + assertTar(".bz2", BZip2CompressorInputStream::new, true); + } + + public void testBZip2TarDoNotPreserveFileTimestamps() throws IOException { + runBuild("buildBZip2Tar", false); + assertTar(".bz2", BZip2CompressorInputStream::new, false); + } + + public void testGZipTar() throws IOException { + runBuild("buildGZipTar", true); + assertTar(".gz", GzipCompressorInputStream::new, true); + } + + public void testGZipTarDoNotPreserveFileTimestamps() throws IOException { + runBuild("buildGZipTar", false); + assertTar(".gz", GzipCompressorInputStream::new, false); + } + + public void testTar() throws IOException { + runBuild("buildTar", true); + assertTar("", fis -> fis, true); + } + + public void testTarDoNotPreserveFileTimestamps() throws IOException { + runBuild("buildTar", false); + assertTar("", fis -> fis, false); + } + + interface FileInputStreamWrapper { + InputStream apply(FileInputStream fis) throws IOException; + } + + private void assertTar( + final String extension, final FileInputStreamWrapper wrapper, boolean preserveFileTimestamps) throws IOException { + try (TarArchiveInputStream tar = new TarArchiveInputStream(wrapper.apply(new FileInputStream(getOutputFile(extension))))) { + TarArchiveEntry entry = tar.getNextTarEntry(); + boolean realFolderEntry = false; + boolean fileEntry = false; + boolean linkToFileEntry = false; + boolean linkInFolderEntry = false; + boolean linkInFolderLinkToFileEntry = false; + boolean linkToRealFolderEntry = false; + while (entry != null) { + if (entry.getName().equals("real-folder/")) { + assertTrue(entry.isDirectory()); + realFolderEntry = true; + } else if (entry.getName().equals("real-folder/file")) { + assertTrue(entry.isFile()); + fileEntry = true; + } else if (entry.getName().equals("real-folder/link-to-file")) { + assertTrue(entry.isSymbolicLink()); + assertThat( + entry.getLinkName(), + anyOf(equalTo("./file"), equalTo(".\\file")) + ); + linkToFileEntry = true; + } else if (entry.getName().equals("link-in-folder/")) { + assertTrue(entry.isDirectory()); + linkInFolderEntry = true; + } else if (entry.getName().equals("link-in-folder/link-to-file")) { + assertTrue(entry.isSymbolicLink()); + assertThat( + entry.getLinkName(), + anyOf(equalTo("../real-folder/file"), equalTo("..\\real-folder\\file")) + ); + linkInFolderLinkToFileEntry = true; + } else if (entry.getName().equals("link-to-real-folder")) { + assertTrue(entry.isSymbolicLink()); + assertThat( + entry.getLinkName(), + anyOf(equalTo("./real-folder"), equalTo(".\\real-folder")) + ); + linkToRealFolderEntry = true; + } else { + throw new GradleException("unexpected entry [" + entry.getName() + "]"); + } + if (preserveFileTimestamps) { + assertTrue(entry.getModTime().getTime() > 0); + } else { + assertThat(entry.getModTime().getTime(), equalTo(0L)); + } + entry = tar.getNextTarEntry(); + } + assertTrue(realFolderEntry); + assertTrue(fileEntry); + assertTrue(linkToFileEntry); + assertTrue(linkInFolderEntry); + assertTrue(linkInFolderLinkToFileEntry); + assertTrue(linkToRealFolderEntry); + } + } + + private void runBuild(final String task, final boolean preserveFileTimestamps) { + final GradleRunner runner = GradleRunner.create().withProjectDir(getProjectDir()) + .withArguments( + task, + "-Dtests.symbolic_link_preserving_tar_source=" + temporaryFolder.getRoot().toString(), + "-Dtests.symbolic_link_preserving_tar_preserve_file_timestamps=" + preserveFileTimestamps, + "-i") + .withPluginClasspath(); + + runner.build(); + } + + private File getProjectDir() { + return getProjectDir("symbolic-link-preserving-tar"); + } + + private File getOutputFile(final String extension) { + return getProjectDir().toPath().resolve("build/distributions/symbolic-link-preserving-tar.tar" + extension).toFile(); + } + +} diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_adoptopenjdk_linux.tar.gz b/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_adoptopenjdk_linux.tar.gz new file mode 100644 index 00000000000..d38b03a4c2a Binary files /dev/null and b/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_adoptopenjdk_linux.tar.gz differ diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_adoptopenjdk_osx.tar.gz b/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_adoptopenjdk_osx.tar.gz new file mode 100644 index 00000000000..10c8f6e8065 Binary files /dev/null and b/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_adoptopenjdk_osx.tar.gz differ diff --git a/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_adoptopenjdk_windows.zip b/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_adoptopenjdk_windows.zip new file mode 100644 index 00000000000..61b6b867397 Binary files /dev/null and b/buildSrc/src/test/resources/org/elasticsearch/gradle/fake_adoptopenjdk_windows.zip differ diff --git a/buildSrc/src/testKit/jdk-download/build.gradle b/buildSrc/src/testKit/jdk-download/build.gradle index eb2aa0260a3..0bf28ec4c85 100644 --- a/buildSrc/src/testKit/jdk-download/build.gradle +++ b/buildSrc/src/testKit/jdk-download/build.gradle @@ -2,9 +2,11 @@ project.gradle.projectsEvaluated { // wire the jdk repo to wiremock String fakeJdkRepo = Objects.requireNonNull(System.getProperty('tests.jdk_repo')) + String fakeJdkVendor = Objects.requireNonNull(System.getProperty('tests.jdk_vendor')) String fakeJdkVersion = Objects.requireNonNull(System.getProperty('tests.jdk_version')) println rootProject.repositories.asMap.keySet() - IvyArtifactRepository repository = (IvyArtifactRepository) rootProject.repositories.getByName("jdk_repo_${fakeJdkVersion}") + IvyArtifactRepository repository = + (IvyArtifactRepository) rootProject.repositories.getByName("jdk_repo_${fakeJdkVendor}_${fakeJdkVersion}") repository.setUrl(fakeJdkRepo) } diff --git a/buildSrc/src/testKit/jdk-download/reuse/build.gradle b/buildSrc/src/testKit/jdk-download/reuse/build.gradle index 8a26a8121e9..b04ad78db81 100644 --- a/buildSrc/src/testKit/jdk-download/reuse/build.gradle +++ b/buildSrc/src/testKit/jdk-download/reuse/build.gradle @@ -1,8 +1,10 @@ evaluationDependsOn ':subproj' +String fakeJdkVendor = Objects.requireNonNull(System.getProperty('tests.jdk_vendor')) String fakeJdkVersion = Objects.requireNonNull(System.getProperty('tests.jdk_version')) jdks { linux_jdk { + vendor = fakeJdkVendor version = fakeJdkVersion platform = "linux" } diff --git a/buildSrc/src/testKit/jdk-download/subproj/build.gradle b/buildSrc/src/testKit/jdk-download/subproj/build.gradle index 8e8b5435b4a..399f8d587ed 100644 --- a/buildSrc/src/testKit/jdk-download/subproj/build.gradle +++ b/buildSrc/src/testKit/jdk-download/subproj/build.gradle @@ -3,17 +3,21 @@ plugins { } +String fakeJdkVendor = Objects.requireNonNull(System.getProperty('tests.jdk_vendor')) String fakeJdkVersion = Objects.requireNonNull(System.getProperty('tests.jdk_version')) jdks { linux { + vendor = fakeJdkVendor version = fakeJdkVersion platform = "linux" } darwin { + vendor = fakeJdkVendor version = fakeJdkVersion platform = "darwin" } windows { + vendor = fakeJdkVendor version = fakeJdkVersion platform = "windows" } diff --git a/buildSrc/src/testKit/symbolic-link-preserving-tar/build.gradle b/buildSrc/src/testKit/symbolic-link-preserving-tar/build.gradle new file mode 100644 index 00000000000..5496f22478d --- /dev/null +++ b/buildSrc/src/testKit/symbolic-link-preserving-tar/build.gradle @@ -0,0 +1,53 @@ +import org.elasticsearch.gradle.tar.SymbolicLinkPreservingTar + +plugins { + id 'base' + id 'distribution' + id 'elasticsearch.symbolic-link-preserving-tar' +} + +final String source = Objects.requireNonNull(System.getProperty('tests.symbolic_link_preserving_tar_source')) +boolean preserveFileTimestamps; +final String testPreserveFileTimestamps = + Objects.requireNonNull(System.getProperty('tests.symbolic_link_preserving_tar_preserve_file_timestamps')) +switch (testPreserveFileTimestamps) { + case "true": + preserveFileTimestamps = true + break + case "false": + preserveFileTimestamps = false + break + default: + throw new IllegalArgumentException( + "tests.symbolic_link_preserving_tar_preserve_file_timestamps must be [true] or [false] but was [" + + testPreserveFileTimestamps + "]") +} + +task buildBZip2Tar(type: SymbolicLinkPreservingTar) { SymbolicLinkPreservingTar tar -> + tar.archiveExtension = 'tar.bz2' + tar.compression = Compression.BZIP2 + tar.preserveFileTimestamps = preserveFileTimestamps + from fileTree(source) + doLast { + println archiveFile.get().asFile.path + } +} + +task buildGZipTar(type: SymbolicLinkPreservingTar) { SymbolicLinkPreservingTar tar -> + tar.archiveExtension = 'tar.gz' + tar.compression = Compression.GZIP + tar.preserveFileTimestamps = preserveFileTimestamps + from fileTree(source) + doLast{ + println archiveFile.get().asFile.path + } +} + +task buildTar(type: SymbolicLinkPreservingTar) { SymbolicLinkPreservingTar tar -> + tar.archiveExtension = 'tar' + tar.preserveFileTimestamps = preserveFileTimestamps + from fileTree(source) + doLast{ + println archiveFile.get().asFile.path + } +} diff --git a/buildSrc/src/testKit/symbolic-link-preserving-tar/settings.gradle b/buildSrc/src/testKit/symbolic-link-preserving-tar/settings.gradle new file mode 100644 index 00000000000..e69de29bb2d diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 12ff2f8cda7..9aec1cbfe2b 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,7 +1,8 @@ elasticsearch = 7.5.0 lucene = 8.2.0 -bundled_jdk = 12.0.2+10@e482c34c86bd4bf8b56c0b35558996b9 +bundled_jdk_vendor = adoptopenjdk +bundled_jdk = 13+33 # optional dependencies spatial4j = 0.7 @@ -32,8 +33,8 @@ bouncycastle = 1.61 # test dependencies randomizedrunner = 2.7.1 junit = 4.12 -httpclient = 4.5.8 -httpcore = 4.4.11 +httpclient = 4.5.10 +httpcore = 4.4.12 httpasyncclient = 4.1.4 commonslogging = 1.1.3 commonscodec = 1.11 diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java index 373b94124d4..daa7c54b7fe 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java @@ -85,15 +85,15 @@ public final class CcrClient { * * See * the docs for more. - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putFollowAsync(PutFollowRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable putFollowAsync(PutFollowRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, CcrRequestConverters::putFollow, options, @@ -129,15 +129,15 @@ public final class CcrClient { * * See * the docs for more. - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void pauseFollowAsync(PauseFollowRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable pauseFollowAsync(PauseFollowRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, CcrRequestConverters::pauseFollow, options, @@ -172,15 +172,15 @@ public final class CcrClient { * * See * the docs for more. - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void resumeFollowAsync(ResumeFollowRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable resumeFollowAsync(ResumeFollowRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, CcrRequestConverters::resumeFollow, options, @@ -217,15 +217,15 @@ public final class CcrClient { * * See * the docs for more. - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void unfollowAsync(UnfollowRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable unfollowAsync(UnfollowRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, CcrRequestConverters::unfollow, options, @@ -260,15 +260,15 @@ public final class CcrClient { * * See the docs for more details * on the intended usage of this API. - * * @param request the request * @param options the request options (e.g., headers), use {@link RequestOptions#DEFAULT} if the defaults are acceptable. + * @return cancellable that may be used to cancel the request */ - public void forgetFollowerAsync( + public Cancellable forgetFollowerAsync( final ForgetFollowerRequest request, final RequestOptions options, final ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + return restHighLevelClient.performRequestAsyncAndParseEntity( request, CcrRequestConverters::forgetFollower, options, @@ -303,15 +303,15 @@ public final class CcrClient { * * See * the docs for more. - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putAutoFollowPatternAsync(PutAutoFollowPatternRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable putAutoFollowPatternAsync(PutAutoFollowPatternRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, CcrRequestConverters::putAutoFollowPattern, options, @@ -347,15 +347,15 @@ public final class CcrClient { * * See * the docs for more. - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteAutoFollowPatternAsync(DeleteAutoFollowPatternRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable deleteAutoFollowPatternAsync(DeleteAutoFollowPatternRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, CcrRequestConverters::deleteAutoFollowPattern, options, @@ -392,15 +392,15 @@ public final class CcrClient { * * See * the docs for more. - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getAutoFollowPatternAsync(GetAutoFollowPatternRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable getAutoFollowPatternAsync(GetAutoFollowPatternRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, CcrRequestConverters::getAutoFollowPattern, options, @@ -437,14 +437,14 @@ public final class CcrClient { * * See * the docs for more. - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return cancellable that may be used to cancel the request */ - public void getCcrStatsAsync(CcrStatsRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable getCcrStatsAsync(CcrStatsRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, CcrRequestConverters::getCcrStats, options, @@ -481,14 +481,14 @@ public final class CcrClient { * * See * the docs for more. - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return cancellable that may be used to cancel the request */ - public void getFollowStatsAsync(FollowStatsRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable getFollowStatsAsync(FollowStatsRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, CcrRequestConverters::getFollowStats, options, @@ -524,14 +524,14 @@ public final class CcrClient { * * See * the docs for more. - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return cancellable that may be used to cancel the request */ - public void getFollowInfoAsync(FollowInfoRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable getFollowInfoAsync(FollowInfoRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, CcrRequestConverters::getFollowInfo, options, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index f9b1474c69a..5e99975f514 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -67,10 +67,12 @@ public final class ClusterClient { * @param clusterUpdateSettingsRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, ClusterRequestConverters::clusterPutSettings, + public Cancellable putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, + ClusterRequestConverters::clusterPutSettings, options, ClusterUpdateSettingsResponse::fromXContent, listener, emptySet()); } @@ -96,10 +98,12 @@ public final class ClusterClient { * @param clusterGetSettingsRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getSettingsAsync(ClusterGetSettingsRequest clusterGetSettingsRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(clusterGetSettingsRequest, ClusterRequestConverters::clusterGetSettings, + public Cancellable getSettingsAsync(ClusterGetSettingsRequest clusterGetSettingsRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( + clusterGetSettingsRequest, ClusterRequestConverters::clusterGetSettings, options, ClusterGetSettingsResponse::fromXContent, listener, emptySet()); } @@ -127,9 +131,11 @@ public final class ClusterClient { * @param healthRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void healthAsync(ClusterHealthRequest healthRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(healthRequest, ClusterRequestConverters::clusterHealth, options, + public Cancellable healthAsync(ClusterHealthRequest healthRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(healthRequest, ClusterRequestConverters::clusterHealth, options, ClusterHealthResponse::fromXContent, listener, singleton(RestStatus.REQUEST_TIMEOUT.getStatus())); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameClient.java index f5104c16022..e56bd442292 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameClient.java @@ -21,20 +21,20 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.core.AcknowledgedResponse; -import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; -import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.PreviewDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StartDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StartDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.StopDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StopDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.UpdateDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.UpdateDataFrameTransformResponse; +import org.elasticsearch.client.transform.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformResponse; +import org.elasticsearch.client.transform.GetDataFrameTransformStatsRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformStatsResponse; +import org.elasticsearch.client.transform.PreviewDataFrameTransformRequest; +import org.elasticsearch.client.transform.PreviewDataFrameTransformResponse; +import org.elasticsearch.client.transform.PutDataFrameTransformRequest; +import org.elasticsearch.client.transform.StartDataFrameTransformRequest; +import org.elasticsearch.client.transform.StartDataFrameTransformResponse; +import org.elasticsearch.client.transform.StopDataFrameTransformRequest; +import org.elasticsearch.client.transform.StopDataFrameTransformResponse; +import org.elasticsearch.client.transform.UpdateDataFrameTransformRequest; +import org.elasticsearch.client.transform.UpdateDataFrameTransformResponse; import java.io.IOException; import java.util.Collections; @@ -48,14 +48,14 @@ public final class DataFrameClient { } /** - * Creates a new Data Frame Transform + * Creates a new transform *

* For additional info - * see - * Create data frame transform documentation + * see + * Create transform documentation * * @param request The PutDataFrameTransformRequest containing the - * {@link org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig}. + * {@link org.elasticsearch.client.transform.transforms.DataFrameTransformConfig}. * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return An AcknowledgedResponse object indicating request success * @throws IOException when there is a serialization issue sending the request or receiving the response @@ -69,20 +69,20 @@ public final class DataFrameClient { } /** - * Creates a new Data Frame Transform asynchronously and notifies listener on completion + * Creates a new transform asynchronously and notifies listener on completion *

* For additional info - * see - * Create data frame transform documentation - * + * see + * Create transform documentation * @param request The PutDataFrameTransformRequest containing the - * {@link org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig}. + * {@link org.elasticsearch.client.transform.transforms.DataFrameTransformConfig}. * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putDataFrameTransformAsync(PutDataFrameTransformRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable putDataFrameTransformAsync(PutDataFrameTransformRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, DataFrameRequestConverters::putDataFrameTransform, options, AcknowledgedResponse::fromXContent, @@ -91,14 +91,14 @@ public final class DataFrameClient { } /** - * Updates an existing Data Frame Transform + * Updates an existing transform *

* For additional info - * see - * Create data frame transform documentation + * see + * Create transform documentation * * @param request The UpdateDataFrameTransformRequest containing the - * {@link org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate}. + * {@link org.elasticsearch.client.transform.transforms.DataFrameTransformConfigUpdate}. * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return An UpdateDataFrameTransformResponse object containing the updated configuration * @throws IOException when there is a serialization issue sending the request or receiving the response @@ -113,21 +113,21 @@ public final class DataFrameClient { } /** - * Updates an existing Data Frame Transform asynchronously and notifies listener on completion + * Updates an existing transform asynchronously and notifies listener on completion *

* For additional info - * see - * Create data frame transform documentation - * + * see + * Create transform documentation * @param request The UpdateDataFrameTransformRequest containing the - * {@link org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate}. + * {@link org.elasticsearch.client.transform.transforms.DataFrameTransformConfigUpdate}. * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void updateDataFrameTransformAsync(UpdateDataFrameTransformRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable updateDataFrameTransformAsync(UpdateDataFrameTransformRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, DataFrameRequestConverters::updateDataFrameTransform, options, UpdateDataFrameTransformResponse::fromXContent, @@ -136,15 +136,15 @@ public final class DataFrameClient { } /** - * Get the running statistics of a Data Frame Transform + * Get the running statistics of a transform *

* For additional info - * see - * Get data frame transform stats documentation + * see + * Get transform stats documentation * * @param request Specifies the which transforms to get the stats for * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return The Data Frame Transform stats + * @return The transform stats * @throws IOException when there is a serialization issue sending the request or receiving the response */ public GetDataFrameTransformStatsResponse getDataFrameTransformStats(GetDataFrameTransformStatsRequest request, RequestOptions options) @@ -157,19 +157,19 @@ public final class DataFrameClient { } /** - * Get the running statistics of a Data Frame Transform asynchronously and notifies listener on completion + * Get the running statistics of a transform asynchronously and notifies listener on completion *

* For additional info - * see - * Get data frame transform stats documentation - * + * see + * Get transform stats documentation * @param request Specifies the which transforms to get the stats for * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getDataFrameTransformStatsAsync(GetDataFrameTransformStatsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getDataFrameTransformStatsAsync(GetDataFrameTransformStatsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, DataFrameRequestConverters::getDataFrameTransformStats, options, GetDataFrameTransformStatsResponse::fromXContent, @@ -178,13 +178,13 @@ public final class DataFrameClient { } /** - * Delete a data frame transform + * Delete a transform *

* For additional info - * see - * Delete data frame transform documentation + * see + * Delete transform documentation * - * @param request The delete data frame transform request + * @param request The delete transform request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return An AcknowledgedResponse object indicating request success * @throws IOException when there is a serialization issue sending the request or receiving the response @@ -199,19 +199,19 @@ public final class DataFrameClient { } /** - * Delete a data frame transform asynchronously and notifies listener on completion + * Delete a transform asynchronously and notifies listener on completion *

* For additional info - * see - * Delete data frame transform documentation - * - * @param request The delete data frame transform request + * see + * Delete transform documentation + * @param request The delete transform request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteDataFrameTransformAsync(DeleteDataFrameTransformRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable deleteDataFrameTransformAsync(DeleteDataFrameTransformRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, DataFrameRequestConverters::deleteDataFrameTransform, options, AcknowledgedResponse::fromXContent, @@ -220,13 +220,13 @@ public final class DataFrameClient { } /** - * Preview the result of a data frame transform + * Preview the result of a transform *

* For additional info - * see - * Preview data frame transform documentation + * see + * Preview transform documentation * - * @param request The preview data frame transform request + * @param request The preview transform request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return A response containing the results of the applied transform * @throws IOException when there is a serialization issue sending the request or receiving the response @@ -241,18 +241,18 @@ public final class DataFrameClient { } /** - * Preview the result of a data frame transform asynchronously and notifies listener on completion + * Preview the result of a transform asynchronously and notifies listener on completion *

- * see - * Preview data frame transform documentation - * - * @param request The preview data frame transform request + * see + * Preview transform documentation + * @param request The preview transform request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void previewDataFrameTransformAsync(PreviewDataFrameTransformRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable previewDataFrameTransformAsync(PreviewDataFrameTransformRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, DataFrameRequestConverters::previewDataFrameTransform, options, PreviewDataFrameTransformResponse::fromXContent, @@ -261,13 +261,13 @@ public final class DataFrameClient { } /** - * Start a data frame transform + * Start a transform *

* For additional info - * see - * Start data frame transform documentation + * see + * Start transform documentation * - * @param request The start data frame transform request + * @param request The start transform request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return A response object indicating request success * @throws IOException when there is a serialization issue sending the request or receiving the response @@ -282,19 +282,19 @@ public final class DataFrameClient { } /** - * Start a data frame transform asynchronously and notifies listener on completion + * Start a transform asynchronously and notifies listener on completion *

* For additional info - * see - * Start data frame transform documentation - * - * @param request The start data frame transform request + * see + * Start transform documentation + * @param request The start transform request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void startDataFrameTransformAsync(StartDataFrameTransformRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable startDataFrameTransformAsync(StartDataFrameTransformRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, DataFrameRequestConverters::startDataFrameTransform, options, StartDataFrameTransformResponse::fromXContent, @@ -303,13 +303,13 @@ public final class DataFrameClient { } /** - * Stop a data frame transform + * Stop a transform *

* For additional info - * see - * Stop data frame transform documentation + * see + * Stop transform documentation * - * @param request The stop data frame transform request + * @param request The stop transform request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return A response object indicating request success * @throws IOException when there is a serialization issue sending the request or receiving the response @@ -324,19 +324,19 @@ public final class DataFrameClient { } /** - * Stop a data frame transform asynchronously and notifies listener on completion + * Stop a transform asynchronously and notifies listener on completion *

* For additional info - * see - * Stop data frame transform documentation - * - * @param request The stop data frame transform request + * see + * Stop transform documentation + * @param request The stop transform request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void stopDataFrameTransformAsync(StopDataFrameTransformRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable stopDataFrameTransformAsync(StopDataFrameTransformRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, DataFrameRequestConverters::stopDataFrameTransform, options, StopDataFrameTransformResponse::fromXContent, @@ -345,13 +345,13 @@ public final class DataFrameClient { } /** - * Get one or more data frame transform configurations + * Get one or more transform configurations *

* For additional info - * see - * Get data frame transform documentation + * see + * Get transform documentation * - * @param request The get data frame transform request + * @param request The get transform request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return An GetDataFrameTransformResponse containing the requested transforms * @throws IOException when there is a serialization issue sending the request or receiving the response @@ -366,19 +366,19 @@ public final class DataFrameClient { } /** - * Get one or more data frame transform configurations asynchronously and notifies listener on completion + * Get one or more transform configurations asynchronously and notifies listener on completion *

* For additional info - * see - * Get data frame transform documentation - * - * @param request The get data frame transform request + * see + * Get data transform documentation + * @param request The get transform request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getDataFrameTransformAsync(GetDataFrameTransformRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getDataFrameTransformAsync(GetDataFrameTransformRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, DataFrameRequestConverters::getDataFrameTransform, options, GetDataFrameTransformResponse::fromXContent, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java index 375c0a7c3af..ab8c79a1784 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java @@ -24,23 +24,23 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.core.PageParams; -import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; -import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StartDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StopDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.UpdateDataFrameTransformRequest; +import org.elasticsearch.client.transform.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformStatsRequest; +import org.elasticsearch.client.transform.PreviewDataFrameTransformRequest; +import org.elasticsearch.client.transform.PutDataFrameTransformRequest; +import org.elasticsearch.client.transform.StartDataFrameTransformRequest; +import org.elasticsearch.client.transform.StopDataFrameTransformRequest; +import org.elasticsearch.client.transform.UpdateDataFrameTransformRequest; import org.elasticsearch.common.Strings; import java.io.IOException; import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE; import static org.elasticsearch.client.RequestConverters.createEntity; -import static org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest.FORCE; -import static org.elasticsearch.client.dataframe.GetDataFrameTransformRequest.ALLOW_NO_MATCH; -import static org.elasticsearch.client.dataframe.PutDataFrameTransformRequest.DEFER_VALIDATION; +import static org.elasticsearch.client.transform.DeleteDataFrameTransformRequest.FORCE; +import static org.elasticsearch.client.transform.GetDataFrameTransformRequest.ALLOW_NO_MATCH; +import static org.elasticsearch.client.transform.PutDataFrameTransformRequest.DEFER_VALIDATION; final class DataFrameRequestConverters { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java index 70912b094d0..44b3b8cf4d0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java @@ -34,7 +34,7 @@ public class GraphClient { GraphClient(RestHighLevelClient restHighLevelClient) { this.restHighLevelClient = restHighLevelClient; } - + /** * Executes an exploration request using the Graph API. * @@ -52,12 +52,13 @@ public class GraphClient { * * See Graph API * on elastic.co. + * @return cancellable that may be used to cancel the request */ - public final void exploreAsync(GraphExploreRequest graphExploreRequest, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, GraphRequestConverters::explore, + public final Cancellable exploreAsync(GraphExploreRequest graphExploreRequest, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, GraphRequestConverters::explore, options, GraphExploreResponse::fromXContent, listener, emptySet()); - } - + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java index 0450ae6a9d4..595a055f276 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java @@ -39,6 +39,8 @@ import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyResponse; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyResponse; +import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsRequest; +import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsResponse; import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest; import java.io.IOException; @@ -74,10 +76,11 @@ public class IndexLifecycleClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getLifecyclePolicyAsync(GetLifecyclePolicyRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::getLifecyclePolicy, options, + public Cancellable getLifecyclePolicyAsync(GetLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::getLifecyclePolicy, options, GetLifecyclePolicyResponse::fromXContent, listener, emptySet()); } @@ -103,10 +106,11 @@ public class IndexLifecycleClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putLifecyclePolicyAsync(PutLifecyclePolicyRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::putLifecyclePolicy, options, + public Cancellable putLifecyclePolicyAsync(PutLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::putLifecyclePolicy, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -132,10 +136,12 @@ public class IndexLifecycleClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteLifecyclePolicyAsync(DeleteLifecyclePolicyRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::deleteLifecyclePolicy, options, + public Cancellable deleteLifecyclePolicyAsync(DeleteLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( + request, IndexLifecycleRequestConverters::deleteLifecyclePolicy, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -161,10 +167,12 @@ public class IndexLifecycleClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void removeIndexLifecyclePolicyAsync(RemoveIndexLifecyclePolicyRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::removeIndexLifecyclePolicy, options, + public Cancellable removeIndexLifecyclePolicyAsync(RemoveIndexLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( + request, IndexLifecycleRequestConverters::removeIndexLifecyclePolicy, options, RemoveIndexLifecyclePolicyResponse::fromXContent, listener, emptySet()); } @@ -189,9 +197,10 @@ public class IndexLifecycleClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void startILMAsync(StartILMRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::startILM, options, + public Cancellable startILMAsync(StartILMRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::startILM, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -227,14 +236,15 @@ public class IndexLifecycleClient { * Asynchronously get the status of index lifecycle management * See * the docs for more. - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void lifecycleManagementStatusAsync(LifecycleManagementStatusRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::lifecycleManagementStatus, options, + public Cancellable lifecycleManagementStatusAsync(LifecycleManagementStatusRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( + request, IndexLifecycleRequestConverters::lifecycleManagementStatus, options, LifecycleManagementStatusResponse::fromXContent, listener, emptySet()); } @@ -245,9 +255,10 @@ public class IndexLifecycleClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void stopILMAsync(StopILMRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::stopILM, options, + public Cancellable stopILMAsync(StopILMRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::stopILM, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -272,10 +283,11 @@ public class IndexLifecycleClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void explainLifecycleAsync(ExplainLifecycleRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::explainLifecycle, options, + public Cancellable explainLifecycleAsync(ExplainLifecycleRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::explainLifecycle, options, ExplainLifecycleResponse::fromXContent, listener, emptySet()); } @@ -300,10 +312,11 @@ public class IndexLifecycleClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void retryLifecyclePolicyAsync(RetryLifecyclePolicyRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::retryLifecycle, options, + public Cancellable retryLifecyclePolicyAsync(RetryLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::retryLifecycle, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -335,10 +348,11 @@ public class IndexLifecycleClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getSnapshotLifecyclePolicyAsync(GetSnapshotLifecyclePolicyRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::getSnapshotLifecyclePolicy, + public Cancellable getSnapshotLifecyclePolicyAsync(GetSnapshotLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::getSnapshotLifecyclePolicy, options, GetSnapshotLifecyclePolicyResponse::fromXContent, listener, emptySet()); } @@ -370,10 +384,11 @@ public class IndexLifecycleClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putSnapshotLifecyclePolicyAsync(PutSnapshotLifecyclePolicyRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::putSnapshotLifecyclePolicy, + public Cancellable putSnapshotLifecyclePolicyAsync(PutSnapshotLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::putSnapshotLifecyclePolicy, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -405,10 +420,12 @@ public class IndexLifecycleClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteSnapshotLifecyclePolicyAsync(DeleteSnapshotLifecyclePolicyRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::deleteSnapshotLifecyclePolicy, + public Cancellable deleteSnapshotLifecyclePolicyAsync(DeleteSnapshotLifecyclePolicyRequest request, + RequestOptions options,ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( + request, IndexLifecycleRequestConverters::deleteSnapshotLifecyclePolicy, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -440,10 +457,48 @@ public class IndexLifecycleClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void executeSnapshotLifecyclePolicyAsync(ExecuteSnapshotLifecyclePolicyRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::executeSnapshotLifecyclePolicy, + public Cancellable executeSnapshotLifecyclePolicyAsync( + ExecuteSnapshotLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( + request, IndexLifecycleRequestConverters::executeSnapshotLifecyclePolicy, options, ExecuteSnapshotLifecyclePolicyResponse::fromXContent, listener, emptySet()); } + + /** + * Retrieve snapshot lifecycle statistics. + * See

+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-get-snapshot-lifecycle-stats.html
+     * 
+ * for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetSnapshotLifecycleStatsResponse getSnapshotLifecycleStats(GetSnapshotLifecycleStatsRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::getSnapshotLifecycleStats, + options, GetSnapshotLifecycleStatsResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously retrieve snapshot lifecycle statistics. + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-get-snapshot-lifecycle-stats.html
+     * 
+ * for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public Cancellable getSnapshotLifecycleStatsAsync(GetSnapshotLifecycleStatsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::getSnapshotLifecycleStats, + options, GetSnapshotLifecycleStatsResponse::fromXContent, listener, emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java index bcf98ce6339..fb5db72cbc9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java @@ -35,6 +35,7 @@ import org.elasticsearch.client.indexlifecycle.StopILMRequest; import org.elasticsearch.client.slm.DeleteSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsRequest; import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest; import org.elasticsearch.common.Strings; @@ -215,4 +216,14 @@ final class IndexLifecycleRequestConverters { request.addParameters(params.asMap()); return request; } + + static Request getSnapshotLifecycleStats(GetSnapshotLifecycleStatsRequest getSnapshotLifecycleStatsRequest) { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_slm/stats").build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(); + params.withMasterTimeout(getSnapshotLifecycleStatsRequest.masterNodeTimeout()); + params.withTimeout(getSnapshotLifecycleStatsRequest.timeout()); + request.addParameters(params.asMap()); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 9394495313d..5003fa4525d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -108,9 +108,12 @@ public final class IndicesClient { * @param deleteIndexRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteAsync(DeleteIndexRequest deleteIndexRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, IndicesRequestConverters::deleteIndex, options, + public Cancellable deleteAsync(DeleteIndexRequest deleteIndexRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, + IndicesRequestConverters::deleteIndex, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -136,11 +139,12 @@ public final class IndicesClient { * @param createIndexRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void createAsync(CreateIndexRequest createIndexRequest, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, IndicesRequestConverters::createIndex, options, + public Cancellable createAsync(CreateIndexRequest createIndexRequest, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, IndicesRequestConverters::createIndex, options, CreateIndexResponse::fromXContent, listener, emptySet()); } @@ -177,12 +181,13 @@ public final class IndicesClient { * @deprecated This method uses an old request object which still refers to types, a deprecated feature. The * method {@link #createAsync(CreateIndexRequest, RequestOptions, ActionListener)} should be used instead, * which accepts a new request object. + * @return cancellable that may be used to cancel the request */ @Deprecated - public void createAsync(org.elasticsearch.action.admin.indices.create.CreateIndexRequest createIndexRequest, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, + public Cancellable createAsync(org.elasticsearch.action.admin.indices.create.CreateIndexRequest createIndexRequest, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, IndicesRequestConverters::createIndex, options, org.elasticsearch.action.admin.indices.create.CreateIndexResponse::fromXContent, listener, emptySet()); } @@ -208,10 +213,11 @@ public final class IndicesClient { * @param putMappingRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putMappingAsync(PutMappingRequest putMappingRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, IndicesRequestConverters::putMapping, options, + public Cancellable putMappingAsync(PutMappingRequest putMappingRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, IndicesRequestConverters::putMapping, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -245,12 +251,13 @@ public final class IndicesClient { * @deprecated This method uses an old request object which still refers to types, a deprecated feature. The * method {@link #putMappingAsync(PutMappingRequest, RequestOptions, ActionListener)} should be used instead, * which accepts a new request object. + * @return cancellable that may be used to cancel the request */ @Deprecated - public void putMappingAsync(org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest putMappingRequest, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, IndicesRequestConverters::putMapping, options, + public Cancellable putMappingAsync(org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest putMappingRequest, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, IndicesRequestConverters::putMapping, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -278,10 +285,11 @@ public final class IndicesClient { * @param getMappingsRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getMappingAsync(GetMappingsRequest getMappingsRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, + public Cancellable getMappingAsync(GetMappingsRequest getMappingsRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, IndicesRequestConverters::getMappings, options, GetMappingsResponse::fromXContent, @@ -324,12 +332,13 @@ public final class IndicesClient { * @deprecated This method uses old request and response objects which still refer to types, a deprecated feature. * The method {@link #getMapping(GetMappingsRequest, RequestOptions)} should be used instead, which accepts a new * request object. + * @return cancellable that may be used to cancel the request */ @Deprecated - public void getMappingAsync(org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest getMappingsRequest, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, + public Cancellable getMappingAsync(org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest getMappingsRequest, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, IndicesRequestConverters::getMappings, options, org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse::fromXContent, @@ -369,13 +378,17 @@ public final class IndicesClient { * @deprecated This method uses old request and response objects which still refer to types, a deprecated feature. * The method {@link #getFieldMappingAsync(GetFieldMappingsRequest, RequestOptions, ActionListener)} should be * used instead, which accepts a new request object. + * @return cancellable that may be used to cancel the request */ @Deprecated - public void getFieldMappingAsync(org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest getFieldMappingsRequest, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getFieldMappingsRequest, IndicesRequestConverters::getFieldMapping, options, - org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse::fromXContent, listener, emptySet()); + public Cancellable getFieldMappingAsync( + org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest getFieldMappingsRequest, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(getFieldMappingsRequest, + IndicesRequestConverters::getFieldMapping, options, + org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse::fromXContent, + listener, emptySet()); } /** @@ -401,10 +414,12 @@ public final class IndicesClient { * @param getFieldMappingsRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getFieldMappingAsync(GetFieldMappingsRequest getFieldMappingsRequest, - RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getFieldMappingsRequest, IndicesRequestConverters::getFieldMapping, options, + public Cancellable getFieldMappingAsync(GetFieldMappingsRequest getFieldMappingsRequest, + RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( + getFieldMappingsRequest, IndicesRequestConverters::getFieldMapping, options, GetFieldMappingsResponse::fromXContent, listener, emptySet()); } @@ -429,10 +444,12 @@ public final class IndicesClient { * @param indicesAliasesRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, IndicesRequestConverters::updateAliases, options, + public Cancellable updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, + IndicesRequestConverters::updateAliases, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -457,9 +474,10 @@ public final class IndicesClient { * @param openIndexRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void openAsync(OpenIndexRequest openIndexRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, IndicesRequestConverters::openIndex, options, + public Cancellable openAsync(OpenIndexRequest openIndexRequest, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, IndicesRequestConverters::openIndex, options, OpenIndexResponse::fromXContent, listener, emptySet()); } @@ -484,9 +502,12 @@ public final class IndicesClient { * @param closeIndexRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void closeAsync(CloseIndexRequest closeIndexRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, IndicesRequestConverters::closeIndex, options, + public Cancellable closeAsync(CloseIndexRequest closeIndexRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, + IndicesRequestConverters::closeIndex, options, CloseIndexResponse::fromXContent, listener, emptySet()); } @@ -512,9 +533,10 @@ public final class IndicesClient { * @param getAliasesRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void existsAliasAsync(GetAliasesRequest getAliasesRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsync(getAliasesRequest, IndicesRequestConverters::existsAlias, options, + public Cancellable existsAliasAsync(GetAliasesRequest getAliasesRequest, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsync(getAliasesRequest, IndicesRequestConverters::existsAlias, options, RestHighLevelClient::convertExistsResponse, listener, emptySet()); } @@ -537,9 +559,10 @@ public final class IndicesClient { * @param refreshRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void refreshAsync(RefreshRequest refreshRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, IndicesRequestConverters::refresh, options, + public Cancellable refreshAsync(RefreshRequest refreshRequest, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, IndicesRequestConverters::refresh, options, RefreshResponse::fromXContent, listener, emptySet()); } @@ -562,15 +585,16 @@ public final class IndicesClient { * @param flushRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void flushAsync(FlushRequest flushRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, IndicesRequestConverters::flush, options, + public Cancellable flushAsync(FlushRequest flushRequest, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, IndicesRequestConverters::flush, options, FlushResponse::fromXContent, listener, emptySet()); } /** * Initiate a synced flush manually using the synced flush API. - * See + * See * Synced flush API on elastic.co * @param syncedFlushRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -584,15 +608,16 @@ public final class IndicesClient { /** * Asynchronously initiate a synced flush manually using the synced flush API. - * See + * See * Synced flush API on elastic.co * @param syncedFlushRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, IndicesRequestConverters::flushSynced, options, + public Cancellable flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, IndicesRequestConverters::flushSynced, options, SyncedFlushResponse::fromXContent, listener, emptySet()); } @@ -617,10 +642,11 @@ public final class IndicesClient { * @param getSettingsRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getSettingsAsync(GetSettingsRequest getSettingsRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getSettingsRequest, IndicesRequestConverters::getSettings, options, + public Cancellable getSettingsAsync(GetSettingsRequest getSettingsRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(getSettingsRequest, IndicesRequestConverters::getSettings, options, GetSettingsResponse::fromXContent, listener, emptySet()); } @@ -645,10 +671,11 @@ public final class IndicesClient { * @param getIndexRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getAsync(GetIndexRequest getIndexRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getIndexRequest, IndicesRequestConverters::getIndex, options, + public Cancellable getAsync(GetIndexRequest getIndexRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(getIndexRequest, IndicesRequestConverters::getIndex, options, GetIndexResponse::fromXContent, listener, emptySet()); } @@ -679,11 +706,12 @@ public final class IndicesClient { * @param listener the listener to be notified upon request completion * @deprecated This method uses an old request object which still refers to types, a deprecated feature. The method * {@link #getAsync(GetIndexRequest, RequestOptions, ActionListener)} should be used instead, which accepts a new request object. + * @return cancellable that may be used to cancel the request */ @Deprecated - public void getAsync(org.elasticsearch.action.admin.indices.get.GetIndexRequest getIndexRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getIndexRequest, IndicesRequestConverters::getIndex, options, + public Cancellable getAsync(org.elasticsearch.action.admin.indices.get.GetIndexRequest getIndexRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(getIndexRequest, IndicesRequestConverters::getIndex, options, org.elasticsearch.action.admin.indices.get.GetIndexResponse::fromXContent, listener, emptySet()); } @@ -724,10 +752,12 @@ public final class IndicesClient { * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion * @deprecated use {@link #forcemergeAsync(ForceMergeRequest, RequestOptions, ActionListener)} instead + * @return cancellable that may be used to cancel the request */ @Deprecated - public void forceMergeAsync(ForceMergeRequest forceMergeRequest, RequestOptions options, ActionListener listener) { - forcemergeAsync(forceMergeRequest, options, listener); + public Cancellable forceMergeAsync(ForceMergeRequest forceMergeRequest, RequestOptions options, + ActionListener listener) { + return forcemergeAsync(forceMergeRequest, options, listener); } /** @@ -737,9 +767,12 @@ public final class IndicesClient { * @param forceMergeRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void forcemergeAsync(ForceMergeRequest forceMergeRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, IndicesRequestConverters::forceMerge, options, + public Cancellable forcemergeAsync(ForceMergeRequest forceMergeRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, + IndicesRequestConverters::forceMerge, options, ForceMergeResponse::fromXContent, listener, emptySet()); } @@ -765,10 +798,12 @@ public final class IndicesClient { * @param clearIndicesCacheRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, IndicesRequestConverters::clearCache, options, + public Cancellable clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, + IndicesRequestConverters::clearCache, options, ClearIndicesCacheResponse::fromXContent, listener, emptySet()); } @@ -798,9 +833,10 @@ public final class IndicesClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void existsAsync(GetIndexRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsync( + public Cancellable existsAsync(GetIndexRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsync( request, IndicesRequestConverters::indicesExist, options, @@ -841,11 +877,12 @@ public final class IndicesClient { * @param listener the listener to be notified upon request completion * @deprecated This method uses an old request object which still refers to types, a deprecated feature. The method * {@link #existsAsync(GetIndexRequest, RequestOptions, ActionListener)} should be used instead, which accepts a new request object. + * @return cancellable that may be used to cancel the request */ @Deprecated - public void existsAsync(org.elasticsearch.action.admin.indices.get.GetIndexRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsync( + public Cancellable existsAsync(org.elasticsearch.action.admin.indices.get.GetIndexRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsync( request, IndicesRequestConverters::indicesExist, options, @@ -876,9 +913,10 @@ public final class IndicesClient { * @param resizeRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void shrinkAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, IndicesRequestConverters::shrink, options, + public Cancellable shrinkAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, IndicesRequestConverters::shrink, options, ResizeResponse::fromXContent, listener, emptySet()); } @@ -903,9 +941,10 @@ public final class IndicesClient { * @param resizeRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void splitAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, IndicesRequestConverters::split, options, + public Cancellable splitAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, IndicesRequestConverters::split, options, ResizeResponse::fromXContent, listener, emptySet()); } @@ -930,9 +969,10 @@ public final class IndicesClient { * @param resizeRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void cloneAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, IndicesRequestConverters::clone, options, + public Cancellable cloneAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, IndicesRequestConverters::clone, options, ResizeResponse::fromXContent, listener, emptySet()); } @@ -957,9 +997,10 @@ public final class IndicesClient { * @param rolloverRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void rolloverAsync(RolloverRequest rolloverRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, IndicesRequestConverters::rollover, options, + public Cancellable rolloverAsync(RolloverRequest rolloverRequest, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, IndicesRequestConverters::rollover, options, RolloverResponse::fromXContent, listener, emptySet()); } @@ -995,11 +1036,13 @@ public final class IndicesClient { * @deprecated This method uses deprecated request and response objects. * The method {@link #rolloverAsync(RolloverRequest, RequestOptions, ActionListener)} should be used instead, which * accepts a new request object. + * @return cancellable that may be used to cancel the request */ @Deprecated - public void rolloverAsync(org.elasticsearch.action.admin.indices.rollover.RolloverRequest rolloverRequest, - RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, IndicesRequestConverters::rollover, options, + public Cancellable rolloverAsync(org.elasticsearch.action.admin.indices.rollover.RolloverRequest rolloverRequest, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, IndicesRequestConverters::rollover, options, org.elasticsearch.action.admin.indices.rollover.RolloverResponse::fromXContent, listener, emptySet()); } @@ -1024,10 +1067,13 @@ public final class IndicesClient { * @param getAliasesRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getAliasAsync(GetAliasesRequest getAliasesRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getAliasesRequest, IndicesRequestConverters::getAlias, options, - GetAliasesResponse::fromXContent, listener, singleton(RestStatus.NOT_FOUND.getStatus())); + public Cancellable getAliasAsync(GetAliasesRequest getAliasesRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(getAliasesRequest, + IndicesRequestConverters::getAlias, options, + GetAliasesResponse::fromXContent, listener, singleton(RestStatus.NOT_FOUND.getStatus())); } /** @@ -1051,10 +1097,12 @@ public final class IndicesClient { * @param updateSettingsRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, IndicesRequestConverters::indexPutSettings, options, + public Cancellable putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, + IndicesRequestConverters::indexPutSettings, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -1088,11 +1136,14 @@ public final class IndicesClient { * @deprecated This old form of request allows types in mappings. * Use {@link #putTemplateAsync(PutIndexTemplateRequest, RequestOptions, ActionListener)} * instead which introduces a new request object without types. + * @return cancellable that may be used to cancel the request */ @Deprecated - public void putTemplateAsync(org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest putIndexTemplateRequest, - RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, IndicesRequestConverters::putTemplate, options, + public Cancellable putTemplateAsync( + org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest putIndexTemplateRequest, + RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, + IndicesRequestConverters::putTemplate, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -1120,10 +1171,12 @@ public final class IndicesClient { * @param putIndexTemplateRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putTemplateAsync(PutIndexTemplateRequest putIndexTemplateRequest, - RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, IndicesRequestConverters::putTemplate, options, + public Cancellable putTemplateAsync(PutIndexTemplateRequest putIndexTemplateRequest, + RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, + IndicesRequestConverters::putTemplate, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -1137,8 +1190,10 @@ public final class IndicesClient { * @return the response * @throws IOException in case there is a problem sending the request or parsing back the response */ - public ValidateQueryResponse validateQuery(ValidateQueryRequest validateQueryRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(validateQueryRequest, IndicesRequestConverters::validateQuery, options, + public ValidateQueryResponse validateQuery(ValidateQueryRequest validateQueryRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(validateQueryRequest, + IndicesRequestConverters::validateQuery, options, ValidateQueryResponse::fromXContent, emptySet()); } @@ -1150,10 +1205,12 @@ public final class IndicesClient { * @param validateQueryRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void validateQueryAsync(ValidateQueryRequest validateQueryRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(validateQueryRequest, IndicesRequestConverters::validateQuery, options, + public Cancellable validateQueryAsync(ValidateQueryRequest validateQueryRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(validateQueryRequest, + IndicesRequestConverters::validateQuery, options, ValidateQueryResponse::fromXContent, listener, emptySet()); } @@ -1174,7 +1231,8 @@ public final class IndicesClient { GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(getIndexTemplatesRequest, IndicesRequestConverters::getTemplatesWithDocumentTypes, - options, org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse::fromXContent, emptySet()); + options, + org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse::fromXContent, emptySet()); } /** @@ -1186,8 +1244,8 @@ public final class IndicesClient { * @return the response * @throws IOException in case there is a problem sending the request or parsing back the response */ - public GetIndexTemplatesResponse getIndexTemplate(GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options) - throws IOException { + public GetIndexTemplatesResponse getIndexTemplate(GetIndexTemplatesRequest getIndexTemplatesRequest, + RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(getIndexTemplatesRequest, IndicesRequestConverters::getTemplates, options, GetIndexTemplatesResponse::fromXContent, emptySet()); @@ -1203,13 +1261,16 @@ public final class IndicesClient { * @param listener the listener to be notified upon request completion * @deprecated This method uses an old response object which still refers to types, a deprecated feature. Use * {@link #getIndexTemplateAsync(GetIndexTemplatesRequest, RequestOptions, ActionListener)} instead which returns a new response object + * @return cancellable that may be used to cancel the request */ @Deprecated - public void getTemplateAsync(GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getIndexTemplatesRequest, + public Cancellable getTemplateAsync(GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(getIndexTemplatesRequest, IndicesRequestConverters::getTemplatesWithDocumentTypes, - options, org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse::fromXContent, listener, emptySet()); + options, + org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse::fromXContent, + listener, emptySet()); } /** @@ -1219,10 +1280,11 @@ public final class IndicesClient { * @param getIndexTemplatesRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getIndexTemplateAsync(GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getIndexTemplatesRequest, + public Cancellable getIndexTemplateAsync(GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(getIndexTemplatesRequest, IndicesRequestConverters::getTemplates, options, GetIndexTemplatesResponse::fromXContent, listener, emptySet()); } @@ -1235,24 +1297,26 @@ public final class IndicesClient { * @return true if any index templates in the request exist, false otherwise * @throws IOException in case there is a problem sending the request or parsing back the response */ - public boolean existsTemplate(IndexTemplatesExistRequest indexTemplatesRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequest(indexTemplatesRequest, IndicesRequestConverters::templatesExist, options, + public boolean existsTemplate(IndexTemplatesExistRequest indexTemplatesRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequest(indexTemplatesRequest, + IndicesRequestConverters::templatesExist, options, RestHighLevelClient::convertExistsResponse, emptySet()); } /** * Uses the Index Templates API to determine if index templates exist - * * @param indexTemplatesExistRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion. The listener will be called with the value {@code true} - * if any index templates in the request exist, false otherwise + * @return cancellable that may be used to cancel the request */ - public void existsTemplateAsync(IndexTemplatesExistRequest indexTemplatesExistRequest, - RequestOptions options, - ActionListener listener) { + public Cancellable existsTemplateAsync(IndexTemplatesExistRequest indexTemplatesExistRequest, + RequestOptions options, + ActionListener listener) { - restHighLevelClient.performRequestAsync(indexTemplatesExistRequest, IndicesRequestConverters::templatesExist, options, + return restHighLevelClient.performRequestAsync(indexTemplatesExistRequest, + IndicesRequestConverters::templatesExist, options, RestHighLevelClient::convertExistsResponse, listener, emptySet()); } @@ -1273,14 +1337,14 @@ public final class IndicesClient { * Asynchronously calls the analyze API * * See Analyze API on elastic.co - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void analyzeAsync(AnalyzeRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::analyze, options, + public Cancellable analyzeAsync(AnalyzeRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::analyze, options, AnalyzeResponse::fromXContent, listener, emptySet()); } @@ -1297,13 +1361,14 @@ public final class IndicesClient { /** * Asynchronously calls the _freeze API - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void freezeAsync(FreezeIndexRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::freezeIndex, options, + public Cancellable freezeAsync(FreezeIndexRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::freezeIndex, options, ShardsAcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -1320,13 +1385,15 @@ public final class IndicesClient { /** * Asynchronously calls the _unfreeze API - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void unfreezeAsync(UnfreezeIndexRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::unfreezeIndex, options, + public Cancellable unfreezeAsync(UnfreezeIndexRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, + IndicesRequestConverters::unfreezeIndex, options, ShardsAcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -1348,14 +1415,14 @@ public final class IndicesClient { * Asynchronously delete an index template using the Index Templates API * See Index Templates API * on elastic.co - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteTemplateAsync(DeleteIndexTemplateRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::deleteTemplate, + public Cancellable deleteTemplateAsync(DeleteIndexTemplateRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::deleteTemplate, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -1372,14 +1439,14 @@ public final class IndicesClient { /** * Asynchronously calls the _reload_search_analyzers API - * * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void reloadAnalyzersAsync(ReloadAnalyzersRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::reloadAnalyzers, options, + public Cancellable reloadAnalyzersAsync(ReloadAnalyzersRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndicesRequestConverters::reloadAnalyzers, options, ReloadAnalyzersResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java index 035d1fd26fb..cfe481bd69b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java @@ -67,9 +67,10 @@ public final class IngestClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putPipelineAsync(PutPipelineRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, IngestRequestConverters::putPipeline, options, + public Cancellable putPipelineAsync(PutPipelineRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, IngestRequestConverters::putPipeline, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -94,9 +95,10 @@ public final class IngestClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getPipelineAsync(GetPipelineRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, IngestRequestConverters::getPipeline, options, + public Cancellable getPipelineAsync(GetPipelineRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, IngestRequestConverters::getPipeline, options, GetPipelineResponse::fromXContent, listener, Collections.singleton(404)); } @@ -123,9 +125,12 @@ public final class IngestClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deletePipelineAsync(DeletePipelineRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, IngestRequestConverters::deletePipeline, options, + public Cancellable deletePipelineAsync(DeletePipelineRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, + IngestRequestConverters::deletePipeline, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -154,11 +159,12 @@ public final class IngestClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void simulateAsync(SimulatePipelineRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, IngestRequestConverters::simulatePipeline, options, + public Cancellable simulateAsync(SimulatePipelineRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, IngestRequestConverters::simulatePipeline, options, SimulatePipelineResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java index 0f74a003315..e0db84e6b2b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java @@ -80,9 +80,10 @@ public final class LicenseClient { * Asynchronously updates license for the cluster. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putLicenseAsync(PutLicenseRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, LicenseRequestConverters::putLicense, options, + public Cancellable putLicenseAsync(PutLicenseRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, LicenseRequestConverters::putLicense, options, PutLicenseResponse::fromXContent, listener, emptySet()); } @@ -101,9 +102,10 @@ public final class LicenseClient { * Asynchronously returns the current license for the cluster cluster. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getLicenseAsync(GetLicenseRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsync(request, LicenseRequestConverters::getLicense, options, + public Cancellable getLicenseAsync(GetLicenseRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsync(request, LicenseRequestConverters::getLicense, options, response -> new GetLicenseResponse(convertResponseToJson(response)), listener, emptySet()); } @@ -122,9 +124,12 @@ public final class LicenseClient { * Asynchronously deletes license from the cluster. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteLicenseAsync(DeleteLicenseRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, LicenseRequestConverters::deleteLicense, options, + public Cancellable deleteLicenseAsync(DeleteLicenseRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, + LicenseRequestConverters::deleteLicense, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -143,12 +148,13 @@ public final class LicenseClient { * Asynchronously starts a trial license on the cluster. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void startTrialAsync(StartTrialRequest request, - RequestOptions options, - ActionListener listener) { + public Cancellable startTrialAsync(StartTrialRequest request, + RequestOptions options, + ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, LicenseRequestConverters::startTrial, options, + return restHighLevelClient.performRequestAsyncAndParseEntity(request, LicenseRequestConverters::startTrial, options, StartTrialResponse::fromXContent, listener, singleton(403)); } @@ -167,10 +173,11 @@ public final class LicenseClient { * Asynchronously initiates an indefinite basic license. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void startBasicAsync(StartBasicRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, LicenseRequestConverters::startBasic, options, + public Cancellable startBasicAsync(StartBasicRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, LicenseRequestConverters::startBasic, options, StartBasicResponse::fromXContent, listener, emptySet()); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index ef078cf52db..62619303685 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -153,13 +153,13 @@ public final class MachineLearningClient { *

* For additional info * see ML PUT job documentation - * * @param request The request containing the {@link org.elasticsearch.client.ml.job.config.Job} settings * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putJobAsync(PutJobRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable putJobAsync(PutJobRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::putJob, options, PutJobResponse::fromXContent, @@ -192,13 +192,13 @@ public final class MachineLearningClient { *

* For additional info * see ML GET job documentation - * * @param request {@link GetJobRequest} Request containing a list of jobId(s) and additional options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified with {@link GetJobResponse} upon request completion + * @return cancellable that may be used to cancel the request */ - public void getJobAsync(GetJobRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getJobAsync(GetJobRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getJob, options, GetJobResponse::fromXContent, @@ -231,13 +231,13 @@ public final class MachineLearningClient { *

* For additional info * see Get job stats docs - * * @param request {@link GetJobStatsRequest} Request containing a list of jobId(s) and additional options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified with {@link GetJobStatsResponse} upon request completion + * @return cancellable that may be used to cancel the request */ - public void getJobStatsAsync(GetJobStatsRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getJobStatsAsync(GetJobStatsRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getJobStats, options, GetJobStatsResponse::fromXContent, @@ -272,14 +272,14 @@ public final class MachineLearningClient { * For additional info * see ML Delete Expired Data * documentation - * * @param request The request to delete expired ML data * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteExpiredDataAsync(DeleteExpiredDataRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable deleteExpiredDataAsync(DeleteExpiredDataRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::deleteExpiredData, options, DeleteExpiredDataResponse::fromXContent, @@ -313,12 +313,13 @@ public final class MachineLearningClient { * For additional info * see ML Delete Job documentation * - * @param request The request to delete the job + * @param request The request to delete the job * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteJobAsync(DeleteJobRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable deleteJobAsync(DeleteJobRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::deleteJob, options, DeleteJobResponse::fromXContent, @@ -360,9 +361,10 @@ public final class MachineLearningClient { * @param request Request containing job_id and additional optional options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void openJobAsync(OpenJobRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable openJobAsync(OpenJobRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::openJob, options, OpenJobResponse::fromXContent, @@ -400,9 +402,10 @@ public final class MachineLearningClient { * @param request Request containing job_ids and additional options. See {@link CloseJobRequest} * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void closeJobAsync(CloseJobRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable closeJobAsync(CloseJobRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::closeJob, options, CloseJobResponse::fromXContent, @@ -449,9 +452,10 @@ public final class MachineLearningClient { * @param request The {@link FlushJobRequest} object enclosing the `jobId` and additional request options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void flushJobAsync(FlushJobRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable flushJobAsync(FlushJobRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::flushJob, options, FlushJobResponse::fromXContent, @@ -489,9 +493,10 @@ public final class MachineLearningClient { * @param request ForecastJobRequest with forecasting options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void forecastJobAsync(ForecastJobRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable forecastJobAsync(ForecastJobRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::forecastJob, options, ForecastJobResponse::fromXContent, @@ -529,9 +534,11 @@ public final class MachineLearningClient { * @param request the {@link DeleteForecastRequest} object enclosing the desired jobId, forecastIDs, and other options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteForecastAsync(DeleteForecastRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable deleteForecastAsync(DeleteForecastRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::deleteForecast, options, AcknowledgedResponse::fromXContent, @@ -569,10 +576,11 @@ public final class MachineLearningClient { * @param request The request to delete the model snapshot * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteModelSnapshotAsync(DeleteModelSnapshotRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable deleteModelSnapshotAsync(DeleteModelSnapshotRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::deleteModelSnapshot, options, AcknowledgedResponse::fromXContent, @@ -610,10 +618,11 @@ public final class MachineLearningClient { * @param request The request to revert to a previous model snapshot * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void revertModelSnapshotAsync(RevertModelSnapshotRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable revertModelSnapshotAsync(RevertModelSnapshotRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::revertModelSnapshot, options, RevertModelSnapshotResponse::fromXContent, @@ -649,9 +658,10 @@ public final class MachineLearningClient { * @param request The request containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} settings * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putDatafeedAsync(PutDatafeedRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable putDatafeedAsync(PutDatafeedRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::putDatafeed, options, PutDatafeedResponse::fromXContent, @@ -689,9 +699,11 @@ public final class MachineLearningClient { * @param request The request containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedUpdate} settings * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void updateDatafeedAsync(UpdateDatafeedRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable updateDatafeedAsync(UpdateDatafeedRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::updateDatafeed, options, PutDatafeedResponse::fromXContent, @@ -730,9 +742,11 @@ public final class MachineLearningClient { * @param request {@link GetDatafeedRequest} Request containing a list of datafeedId(s) and additional options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified with {@link GetDatafeedResponse} upon request completion + * @return cancellable that may be used to cancel the request */ - public void getDatafeedAsync(GetDatafeedRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getDatafeedAsync(GetDatafeedRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getDatafeed, options, GetDatafeedResponse::fromXContent, @@ -770,9 +784,11 @@ public final class MachineLearningClient { * @param request The request to delete the datafeed * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteDatafeedAsync(DeleteDatafeedRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable deleteDatafeedAsync(DeleteDatafeedRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::deleteDatafeed, options, AcknowledgedResponse::fromXContent, @@ -810,9 +826,11 @@ public final class MachineLearningClient { * @param request The request to start the datafeed * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void startDatafeedAsync(StartDatafeedRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable startDatafeedAsync(StartDatafeedRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::startDatafeed, options, StartDatafeedResponse::fromXContent, @@ -850,9 +868,11 @@ public final class MachineLearningClient { * @param request The request to stop the datafeed * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void stopDatafeedAsync(StopDatafeedRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable stopDatafeedAsync(StopDatafeedRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::stopDatafeed, options, StopDatafeedResponse::fromXContent, @@ -910,11 +930,12 @@ public final class MachineLearningClient { * @param request {@link GetDatafeedStatsRequest} Request containing a list of datafeedId(s) and additional options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified with {@link GetDatafeedStatsResponse} upon request completion + * @return cancellable that may be used to cancel the request */ - public void getDatafeedStatsAsync(GetDatafeedStatsRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getDatafeedStatsAsync(GetDatafeedStatsRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getDatafeedStats, options, GetDatafeedStatsResponse::fromXContent, @@ -932,11 +953,12 @@ public final class MachineLearningClient { * @param request The request to preview the datafeed * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void previewDatafeedAsync(PreviewDatafeedRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable previewDatafeedAsync(PreviewDatafeedRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::previewDatafeed, options, PreviewDatafeedResponse::fromXContent, @@ -972,9 +994,10 @@ public final class MachineLearningClient { * @param request the {@link UpdateJobRequest} object enclosing the desired updates * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void updateJobAsync(UpdateJobRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable updateJobAsync(UpdateJobRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::updateJob, options, PutJobResponse::fromXContent, @@ -1005,12 +1028,13 @@ public final class MachineLearningClient { * For additional info * see ML GET buckets documentation * - * @param request The request + * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getBucketsAsync(GetBucketsRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getBucketsAsync(GetBucketsRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getBuckets, options, GetBucketsResponse::fromXContent, @@ -1047,9 +1071,11 @@ public final class MachineLearningClient { * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getCategoriesAsync(GetCategoriesRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getCategoriesAsync(GetCategoriesRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getCategories, options, GetCategoriesResponse::fromXContent, @@ -1086,10 +1112,11 @@ public final class MachineLearningClient { * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getModelSnapshotsAsync(GetModelSnapshotsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getModelSnapshotsAsync(GetModelSnapshotsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getModelSnapshots, options, GetModelSnapshotsResponse::fromXContent, @@ -1127,10 +1154,11 @@ public final class MachineLearningClient { * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void updateModelSnapshotAsync(UpdateModelSnapshotRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable updateModelSnapshotAsync(UpdateModelSnapshotRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::updateModelSnapshot, options, UpdateModelSnapshotResponse::fromXContent, @@ -1166,10 +1194,11 @@ public final class MachineLearningClient { * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getOverallBucketsAsync(GetOverallBucketsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getOverallBucketsAsync(GetOverallBucketsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getOverallBuckets, options, GetOverallBucketsResponse::fromXContent, @@ -1203,9 +1232,10 @@ public final class MachineLearningClient { * @param request the request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getRecordsAsync(GetRecordsRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getRecordsAsync(GetRecordsRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getRecords, options, GetRecordsResponse::fromXContent, @@ -1245,9 +1275,10 @@ public final class MachineLearningClient { * @param request PostDataRequest containing the data to post and some additional options * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void postDataAsync(PostDataRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable postDataAsync(PostDataRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::postData, options, PostDataResponse::fromXContent, @@ -1283,9 +1314,11 @@ public final class MachineLearningClient { * @param request The calendars request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getCalendarsAsync(GetCalendarsRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getCalendarsAsync(GetCalendarsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getCalendars, options, GetCalendarsResponse::fromXContent, @@ -1321,10 +1354,11 @@ public final class MachineLearningClient { * @param request the request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getInfluencersAsync(GetInfluencersRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getInfluencersAsync(GetInfluencersRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getInfluencers, options, GetInfluencersResponse::fromXContent, @@ -1362,9 +1396,10 @@ public final class MachineLearningClient { * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putCalendarAsync(PutCalendarRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable putCalendarAsync(PutCalendarRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::putCalendar, options, PutCalendarResponse::fromXContent, @@ -1402,9 +1437,11 @@ public final class MachineLearningClient { * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putCalendarJobAsync(PutCalendarJobRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable putCalendarJobAsync(PutCalendarJobRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::putCalendarJob, options, PutCalendarResponse::fromXContent, @@ -1442,11 +1479,12 @@ public final class MachineLearningClient { * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteCalendarJobAsync(DeleteCalendarJobRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable deleteCalendarJobAsync(DeleteCalendarJobRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::deleteCalendarJob, options, PutCalendarResponse::fromXContent, @@ -1484,9 +1522,11 @@ public final class MachineLearningClient { * @param request The request to delete the calendar * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteCalendarAsync(DeleteCalendarRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable deleteCalendarAsync(DeleteCalendarRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::deleteCalendar, options, AcknowledgedResponse::fromXContent, @@ -1524,10 +1564,11 @@ public final class MachineLearningClient { * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getCalendarEventsAsync(GetCalendarEventsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getCalendarEventsAsync(GetCalendarEventsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getCalendarEvents, options, GetCalendarEventsResponse::fromXContent, @@ -1565,10 +1606,11 @@ public final class MachineLearningClient { * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void postCalendarEventAsync(PostCalendarEventRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable postCalendarEventAsync(PostCalendarEventRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::postCalendarEvents, options, PostCalendarEventResponse::fromXContent, @@ -1606,11 +1648,12 @@ public final class MachineLearningClient { * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteCalendarEventAsync(DeleteCalendarEventRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable deleteCalendarEventAsync(DeleteCalendarEventRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::deleteCalendarEvent, options, AcknowledgedResponse::fromXContent, @@ -1646,9 +1689,10 @@ public final class MachineLearningClient { * @param request The request containing the {@link org.elasticsearch.client.ml.job.config.MlFilter} settings * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putFilterAsync(PutFilterRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable putFilterAsync(PutFilterRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::putFilter, options, PutFilterResponse::fromXContent, @@ -1684,9 +1728,10 @@ public final class MachineLearningClient { * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getFilterAsync(GetFiltersRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getFilterAsync(GetFiltersRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getFilter, options, GetFiltersResponse::fromXContent, @@ -1724,9 +1769,10 @@ public final class MachineLearningClient { * @param request The request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void updateFilterAsync(UpdateFilterRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable updateFilterAsync(UpdateFilterRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::updateFilter, options, PutFilterResponse::fromXContent, @@ -1764,9 +1810,11 @@ public final class MachineLearningClient { * @param request The request to delete the filter * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteFilterAsync(DeleteFilterRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable deleteFilterAsync(DeleteFilterRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::deleteFilter, options, AcknowledgedResponse::fromXContent, @@ -1802,9 +1850,10 @@ public final class MachineLearningClient { * @param request The request of Machine Learning info * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getMlInfoAsync(MlInfoRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getMlInfoAsync(MlInfoRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::mlInfo, options, MlInfoResponse::fromXContent, @@ -1842,10 +1891,11 @@ public final class MachineLearningClient { * @param request The find file structure request * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void findFileStructureAsync(FindFileStructureRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable findFileStructureAsync(FindFileStructureRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::findFileStructure, options, FindFileStructureResponse::fromXContent, @@ -1881,9 +1931,11 @@ public final class MachineLearningClient { * @param request The request of Machine Learning info * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void setUpgradeModeAsync(SetUpgradeModeRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable setUpgradeModeAsync(SetUpgradeModeRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::setUpgradeMode, options, AcknowledgedResponse::fromXContent, @@ -1925,10 +1977,11 @@ public final class MachineLearningClient { * {@link org.elasticsearch.client.ml.dataframe.DataFrameAnalyticsConfig} * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putDataFrameAnalyticsAsync(PutDataFrameAnalyticsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable putDataFrameAnalyticsAsync(PutDataFrameAnalyticsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::putDataFrameAnalytics, options, PutDataFrameAnalyticsResponse::fromXContent, @@ -1967,10 +2020,11 @@ public final class MachineLearningClient { * @param request The {@link GetDataFrameAnalyticsRequest} * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getDataFrameAnalyticsAsync(GetDataFrameAnalyticsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getDataFrameAnalyticsAsync(GetDataFrameAnalyticsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getDataFrameAnalytics, options, GetDataFrameAnalyticsResponse::fromXContent, @@ -2008,10 +2062,11 @@ public final class MachineLearningClient { * @param request The {@link GetDataFrameAnalyticsStatsRequest} * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getDataFrameAnalyticsStatsAsync(GetDataFrameAnalyticsStatsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getDataFrameAnalyticsStatsAsync(GetDataFrameAnalyticsStatsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::getDataFrameAnalyticsStats, options, GetDataFrameAnalyticsStatsResponse::fromXContent, @@ -2047,13 +2102,14 @@ public final class MachineLearningClient { * see * Start Data Frame Analytics documentation * - * @param request The {@link StartDataFrameAnalyticsRequest} + * @param request The {@link StartDataFrameAnalyticsRequest} * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void startDataFrameAnalyticsAsync(StartDataFrameAnalyticsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable startDataFrameAnalyticsAsync(StartDataFrameAnalyticsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::startDataFrameAnalytics, options, AcknowledgedResponse::fromXContent, @@ -2092,10 +2148,11 @@ public final class MachineLearningClient { * @param request The {@link StopDataFrameAnalyticsRequest} * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void stopDataFrameAnalyticsAsync(StopDataFrameAnalyticsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable stopDataFrameAnalyticsAsync(StopDataFrameAnalyticsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::stopDataFrameAnalytics, options, StopDataFrameAnalyticsResponse::fromXContent, @@ -2134,10 +2191,11 @@ public final class MachineLearningClient { * @param request The {@link DeleteDataFrameAnalyticsRequest} * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteDataFrameAnalyticsAsync(DeleteDataFrameAnalyticsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable deleteDataFrameAnalyticsAsync(DeleteDataFrameAnalyticsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::deleteDataFrameAnalytics, options, AcknowledgedResponse::fromXContent, @@ -2176,10 +2234,11 @@ public final class MachineLearningClient { * @param request The {@link EvaluateDataFrameRequest} * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void evaluateDataFrameAsync(EvaluateDataFrameRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable evaluateDataFrameAsync(EvaluateDataFrameRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MLRequestConverters::evaluateDataFrame, options, EvaluateDataFrameResponse::fromXContent, @@ -2219,10 +2278,11 @@ public final class MachineLearningClient { * @param request The {@link PutDataFrameAnalyticsRequest} * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void estimateMemoryUsageAsync(PutDataFrameAnalyticsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable estimateMemoryUsageAsync(PutDataFrameAnalyticsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, MLRequestConverters::estimateMemoryUsage, options, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java index a95115f71fa..32f7cb140e5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java @@ -58,10 +58,11 @@ public final class MigrationClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getDeprecationInfoAsync(DeprecationInfoRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, MigrationRequestConverters::getDeprecationInfo, options, + public Cancellable getDeprecationInfoAsync(DeprecationInfoRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, MigrationRequestConverters::getDeprecationInfo, options, DeprecationInfoResponse::fromXContent, listener, Collections.emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 165a829c396..2fbfeb21a2e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -484,6 +484,7 @@ final class RequestConverters { if (multiSearchTemplateRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) { params.putParam("max_concurrent_searches", Integer.toString(multiSearchTemplateRequest.maxConcurrentSearchRequests())); } + request.addParameters(params.asMap()); XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); byte[] source = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, xContent); @@ -497,8 +498,14 @@ final class RequestConverters { params.withRouting(countRequest.routing()); params.withPreference(countRequest.preference()); params.withIndicesOptions(countRequest.indicesOptions()); + if (countRequest.terminateAfter() != 0){ + params.withTerminateAfter(countRequest.terminateAfter()); + } + if (countRequest.minScore() != null){ + params.putParam("min_score", String.valueOf(countRequest.minScore())); + } request.addParameters(params.asMap()); - request.setEntity(createEntity(countRequest.source(), REQUEST_BODY_CONTENT_TYPE)); + request.setEntity(createEntity(countRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } @@ -907,6 +914,10 @@ final class RequestConverters { return this; } + Params withTerminateAfter(int terminateAfter){ + return putParam("terminate_after", String.valueOf(terminateAfter)); + } + Params withTimeout(TimeValue timeout) { return putParam("timeout", timeout); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 38a7214a00c..093d717389f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -473,8 +473,8 @@ public class RestHighLevelClient implements Closeable { * are shipped with the Elastic Stack distribution of Elasticsearch. All of * these APIs will 404 if run against the OSS distribution of Elasticsearch. *

- * See the - * Data Frame APIs on elastic.co for more information. + * See the + * Transform APIs on elastic.co for more information. * * @return the client wrapper for making Data Frame API calls */ @@ -503,9 +503,11 @@ public class RestHighLevelClient implements Closeable { * @param bulkRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void bulkAsync(BulkRequest bulkRequest, RequestOptions options, ActionListener listener) { - performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, listener, emptySet()); + public final Cancellable bulkAsync(BulkRequest bulkRequest, RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, options, + BulkResponse::fromXContent, listener, emptySet()); } /** @@ -540,9 +542,11 @@ public class RestHighLevelClient implements Closeable { * @param reindexRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void reindexAsync(ReindexRequest reindexRequest, RequestOptions options, ActionListener listener) { - performRequestAsyncAndParseEntity( + public final Cancellable reindexAsync(ReindexRequest reindexRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity( reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, listener, singleton(409) ); } @@ -568,10 +572,11 @@ public class RestHighLevelClient implements Closeable { * @param updateByQueryRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void updateByQueryAsync(UpdateByQueryRequest updateByQueryRequest, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity( + public final Cancellable updateByQueryAsync(UpdateByQueryRequest updateByQueryRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity( updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, listener, singleton(409) ); } @@ -597,10 +602,11 @@ public class RestHighLevelClient implements Closeable { * @param deleteByQueryRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void deleteByQueryAsync(DeleteByQueryRequest deleteByQueryRequest, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity( + public final Cancellable deleteByQueryAsync(DeleteByQueryRequest deleteByQueryRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity( deleteByQueryRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, listener, singleton(409) ); } @@ -625,10 +631,11 @@ public class RestHighLevelClient implements Closeable { * @param rethrottleRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void deleteByQueryRethrottleAsync(RethrottleRequest rethrottleRequest, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity(rethrottleRequest, RequestConverters::rethrottleDeleteByQuery, options, + public final Cancellable deleteByQueryRethrottleAsync(RethrottleRequest rethrottleRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(rethrottleRequest, RequestConverters::rethrottleDeleteByQuery, options, ListTasksResponse::fromXContent, listener, emptySet()); } @@ -652,10 +659,11 @@ public class RestHighLevelClient implements Closeable { * @param rethrottleRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void updateByQueryRethrottleAsync(RethrottleRequest rethrottleRequest, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity(rethrottleRequest, RequestConverters::rethrottleUpdateByQuery, options, + public final Cancellable updateByQueryRethrottleAsync(RethrottleRequest rethrottleRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(rethrottleRequest, RequestConverters::rethrottleUpdateByQuery, options, ListTasksResponse::fromXContent, listener, emptySet()); } @@ -677,15 +685,15 @@ public class RestHighLevelClient implements Closeable { * Executes a reindex rethrottling request. * See the * Reindex rethrottling API on elastic.co - * * @param rethrottleRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void reindexRethrottleAsync(RethrottleRequest rethrottleRequest, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity(rethrottleRequest, RequestConverters::rethrottleReindex, options, ListTasksResponse::fromXContent, - listener, emptySet()); + public final Cancellable reindexRethrottleAsync(RethrottleRequest rethrottleRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(rethrottleRequest, + RequestConverters::rethrottleReindex, options, ListTasksResponse::fromXContent, listener, emptySet()); } /** @@ -725,9 +733,10 @@ public class RestHighLevelClient implements Closeable { * @param getRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void getAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { - performRequestAsyncAndParseEntity(getRequest, RequestConverters::get, options, GetResponse::fromXContent, listener, + public final Cancellable getAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity(getRequest, RequestConverters::get, options, GetResponse::fromXContent, listener, singleton(404)); } @@ -764,10 +773,12 @@ public class RestHighLevelClient implements Closeable { * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion * @deprecated use {@link #mgetAsync(MultiGetRequest, RequestOptions, ActionListener)} instead + * @return cancellable that may be used to cancel the request */ @Deprecated - public final void multiGetAsync(MultiGetRequest multiGetRequest, RequestOptions options, ActionListener listener) { - mgetAsync(multiGetRequest, options, listener); + public final Cancellable multiGetAsync(MultiGetRequest multiGetRequest, RequestOptions options, + ActionListener listener) { + return mgetAsync(multiGetRequest, options, listener); } /** @@ -776,10 +787,12 @@ public class RestHighLevelClient implements Closeable { * @param multiGetRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void mgetAsync(MultiGetRequest multiGetRequest, RequestOptions options, ActionListener listener) { - performRequestAsyncAndParseEntity(multiGetRequest, RequestConverters::multiGet, options, MultiGetResponse::fromXContent, listener, - singleton(404)); + public final Cancellable mgetAsync(MultiGetRequest multiGetRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(multiGetRequest, RequestConverters::multiGet, options, + MultiGetResponse::fromXContent, listener, singleton(404)); } /** @@ -799,9 +812,10 @@ public class RestHighLevelClient implements Closeable { * @param getRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void existsAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { - performRequestAsync(getRequest, RequestConverters::exists, options, RestHighLevelClient::convertExistsResponse, listener, + public final Cancellable existsAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { + return performRequestAsync(getRequest, RequestConverters::exists, options, RestHighLevelClient::convertExistsResponse, listener, emptySet()); } @@ -824,10 +838,11 @@ public class RestHighLevelClient implements Closeable { * @param getRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void existsSourceAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { - performRequestAsync(getRequest, RequestConverters::sourceExists, options, RestHighLevelClient::convertExistsResponse, listener, - emptySet()); + public final Cancellable existsSourceAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { + return performRequestAsync(getRequest, RequestConverters::sourceExists, options, + RestHighLevelClient::convertExistsResponse, listener, emptySet()); } /** @@ -838,7 +853,8 @@ public class RestHighLevelClient implements Closeable { * @return the response */ public final IndexResponse index(IndexRequest indexRequest, RequestOptions options) throws IOException { - return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet()); + return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, + IndexResponse::fromXContent, emptySet()); } /** @@ -847,9 +863,10 @@ public class RestHighLevelClient implements Closeable { * @param indexRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void indexAsync(IndexRequest indexRequest, RequestOptions options, ActionListener listener) { - performRequestAsyncAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, listener, + public final Cancellable indexAsync(IndexRequest indexRequest, RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, listener, emptySet()); } @@ -871,9 +888,10 @@ public class RestHighLevelClient implements Closeable { * @param countRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void countAsync(CountRequest countRequest, RequestOptions options, ActionListener listener) { - performRequestAsyncAndParseEntity(countRequest, RequestConverters::count, options,CountResponse::fromXContent, + public final Cancellable countAsync(CountRequest countRequest, RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity(countRequest, RequestConverters::count, options,CountResponse::fromXContent, listener, emptySet()); } @@ -894,9 +912,10 @@ public class RestHighLevelClient implements Closeable { * @param updateRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void updateAsync(UpdateRequest updateRequest, RequestOptions options, ActionListener listener) { - performRequestAsyncAndParseEntity(updateRequest, RequestConverters::update, options, UpdateResponse::fromXContent, listener, + public final Cancellable updateAsync(UpdateRequest updateRequest, RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity(updateRequest, RequestConverters::update, options, UpdateResponse::fromXContent, listener, emptySet()); } @@ -918,9 +937,10 @@ public class RestHighLevelClient implements Closeable { * @param deleteRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void deleteAsync(DeleteRequest deleteRequest, RequestOptions options, ActionListener listener) { - performRequestAsyncAndParseEntity(deleteRequest, RequestConverters::delete, options, DeleteResponse::fromXContent, listener, + public final Cancellable deleteAsync(DeleteRequest deleteRequest, RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity(deleteRequest, RequestConverters::delete, options, DeleteResponse::fromXContent, listener, Collections.singleton(404)); } @@ -946,9 +966,10 @@ public class RestHighLevelClient implements Closeable { * @param searchRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void searchAsync(SearchRequest searchRequest, RequestOptions options, ActionListener listener) { - performRequestAsyncAndParseEntity( + public final Cancellable searchAsync(SearchRequest searchRequest, RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity( searchRequest, r -> RequestConverters.search(r, "_search"), options, @@ -992,11 +1013,12 @@ public class RestHighLevelClient implements Closeable { * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion * @deprecated use {@link #msearchAsync(MultiSearchRequest, RequestOptions, ActionListener)} instead + * @return cancellable that may be used to cancel the request */ @Deprecated - public final void multiSearchAsync(MultiSearchRequest searchRequest, RequestOptions options, - ActionListener listener) { - msearchAsync(searchRequest, options, listener); + public final Cancellable multiSearchAsync(MultiSearchRequest searchRequest, RequestOptions options, + ActionListener listener) { + return msearchAsync(searchRequest, options, listener); } /** @@ -1006,10 +1028,11 @@ public class RestHighLevelClient implements Closeable { * @param searchRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void msearchAsync(MultiSearchRequest searchRequest, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity(searchRequest, RequestConverters::multiSearch, options, MultiSearchResponse::fromXContext, + public final Cancellable msearchAsync(MultiSearchRequest searchRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(searchRequest, RequestConverters::multiSearch, options, MultiSearchResponse::fromXContext, listener, emptySet()); } @@ -1051,11 +1074,12 @@ public class RestHighLevelClient implements Closeable { * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion * @deprecated use {@link #scrollAsync(SearchScrollRequest, RequestOptions, ActionListener)} instead + * @return cancellable that may be used to cancel the request */ @Deprecated - public final void searchScrollAsync(SearchScrollRequest searchScrollRequest, RequestOptions options, - ActionListener listener) { - scrollAsync(searchScrollRequest, options, listener); + public final Cancellable searchScrollAsync(SearchScrollRequest searchScrollRequest, RequestOptions options, + ActionListener listener) { + return scrollAsync(searchScrollRequest, options, listener); } /** @@ -1066,11 +1090,12 @@ public class RestHighLevelClient implements Closeable { * @param searchScrollRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void scrollAsync(SearchScrollRequest searchScrollRequest, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, options, SearchResponse::fromXContent, - listener, emptySet()); + public final Cancellable scrollAsync(SearchScrollRequest searchScrollRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, + options, SearchResponse::fromXContent, listener, emptySet()); } /** @@ -1095,11 +1120,12 @@ public class RestHighLevelClient implements Closeable { * @param clearScrollRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void clearScrollAsync(ClearScrollRequest clearScrollRequest, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, options, ClearScrollResponse::fromXContent, - listener, emptySet()); + public final Cancellable clearScrollAsync(ClearScrollRequest clearScrollRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, + options, ClearScrollResponse::fromXContent, listener, emptySet()); } /** @@ -1121,10 +1147,11 @@ public class RestHighLevelClient implements Closeable { * * See Search Template API * on elastic.co. + * @return cancellable that may be used to cancel the request */ - public final void searchTemplateAsync(SearchTemplateRequest searchTemplateRequest, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, options, + public final Cancellable searchTemplateAsync(SearchTemplateRequest searchTemplateRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, options, SearchTemplateResponse::fromXContent, listener, emptySet()); } @@ -1152,9 +1179,10 @@ public class RestHighLevelClient implements Closeable { * @param explainRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void explainAsync(ExplainRequest explainRequest, RequestOptions options, ActionListener listener) { - performRequestAsync(explainRequest, RequestConverters::explain, options, + public final Cancellable explainAsync(ExplainRequest explainRequest, RequestOptions options, ActionListener listener) { + return performRequestAsync(explainRequest, RequestConverters::explain, options, response -> { CheckedFunction entityParser = parser -> ExplainResponse.fromXContent(parser, convertExistsResponse(response)); @@ -1186,9 +1214,12 @@ public class RestHighLevelClient implements Closeable { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void termvectorsAsync(TermVectorsRequest request, RequestOptions options, ActionListener listener) { - performRequestAsyncAndParseEntity(request, RequestConverters::termVectors, options, TermVectorsResponse::fromXContent, listener, + public final Cancellable termvectorsAsync(TermVectorsRequest request, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(request, RequestConverters::termVectors, options, + TermVectorsResponse::fromXContent, listener, emptySet()); } @@ -1216,10 +1247,11 @@ public class RestHighLevelClient implements Closeable { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void mtermvectorsAsync(MultiTermVectorsRequest request, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity( + public final Cancellable mtermvectorsAsync(MultiTermVectorsRequest request, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity( request, RequestConverters::mtermVectors, options, MultiTermVectorsResponse::fromXContent, listener, emptySet()); } @@ -1255,11 +1287,12 @@ public class RestHighLevelClient implements Closeable { * * See Multi Search Template API * on elastic.co. + * @return cancellable that may be used to cancel the request */ - public final void msearchTemplateAsync(MultiSearchTemplateRequest multiSearchTemplateRequest, - RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity(multiSearchTemplateRequest, RequestConverters::multiSearchTemplate, + public final Cancellable msearchTemplateAsync(MultiSearchTemplateRequest multiSearchTemplateRequest, + RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(multiSearchTemplateRequest, RequestConverters::multiSearchTemplate, options, MultiSearchTemplateResponse::fromXContext, listener, emptySet()); } @@ -1270,9 +1303,12 @@ public class RestHighLevelClient implements Closeable { * @param rankEvalRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void rankEvalAsync(RankEvalRequest rankEvalRequest, RequestOptions options, ActionListener listener) { - performRequestAsyncAndParseEntity(rankEvalRequest, RequestConverters::rankEval, options, RankEvalResponse::fromXContent, listener, + public final Cancellable rankEvalAsync(RankEvalRequest rankEvalRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(rankEvalRequest, RequestConverters::rankEval, options, + RankEvalResponse::fromXContent, listener, emptySet()); } @@ -1310,10 +1346,11 @@ public class RestHighLevelClient implements Closeable { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getScriptAsync(GetStoredScriptRequest request, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity(request, RequestConverters::getScript, options, + public Cancellable getScriptAsync(GetStoredScriptRequest request, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(request, RequestConverters::getScript, options, GetStoredScriptResponse::fromXContent, listener, emptySet()); } @@ -1337,10 +1374,11 @@ public class RestHighLevelClient implements Closeable { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteScriptAsync(DeleteStoredScriptRequest request, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity(request, RequestConverters::deleteScript, options, + public Cancellable deleteScriptAsync(DeleteStoredScriptRequest request, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(request, RequestConverters::deleteScript, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -1365,10 +1403,11 @@ public class RestHighLevelClient implements Closeable { * @param putStoredScriptRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putScriptAsync(PutStoredScriptRequest putStoredScriptRequest, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity(putStoredScriptRequest, RequestConverters::putScript, options, + public Cancellable putScriptAsync(PutStoredScriptRequest putStoredScriptRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(putStoredScriptRequest, RequestConverters::putScript, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -1379,10 +1418,11 @@ public class RestHighLevelClient implements Closeable { * @param fieldCapabilitiesRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest, RequestOptions options, - ActionListener listener) { - performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps, options, + public final Cancellable fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest, RequestOptions options, + ActionListener listener) { + return performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps, options, FieldCapabilitiesResponse::fromXContent, listener, emptySet()); } @@ -1514,26 +1554,28 @@ public class RestHighLevelClient implements Closeable { /** * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + * @return Cancellable instance that may be used to cancel the request */ @Deprecated - protected final void performRequestAsyncAndParseEntity(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - ActionListener listener, Set ignores) { - performRequestAsync(request, requestConverter, options, + protected final Cancellable performRequestAsyncAndParseEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + ActionListener listener, Set ignores) { + return performRequestAsync(request, requestConverter, options, response -> parseEntity(response.getEntity(), entityParser), listener, ignores); } /** * Defines a helper method for asynchronously performing a request. + * @return Cancellable instance that may be used to cancel the request */ - protected final void performRequestAsyncAndParseEntity(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - ActionListener listener, Set ignores) { - performRequestAsync(request, requestConverter, options, + protected final Cancellable performRequestAsyncAndParseEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + ActionListener listener, Set ignores) { + return performRequestAsync(request, requestConverter, options, response -> parseEntity(response.getEntity(), entityParser), listener, ignores); } @@ -1541,56 +1583,59 @@ public class RestHighLevelClient implements Closeable { /** * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation * layer has been added to the ReST client, and requests should extend {@link Validatable} instead of {@link ActionRequest}. + * @return Cancellable instance that may be used to cancel the request */ @Deprecated - protected final void performRequestAsync(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, Set ignores) { + protected final Cancellable performRequestAsync(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { ActionRequestValidationException validationException = request.validate(); if (validationException != null && validationException.validationErrors().isEmpty() == false) { listener.onFailure(validationException); - return; + return Cancellable.NO_OP; } - internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); + return internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); } /** * Defines a helper method for asynchronously performing a request. + * @return Cancellable instance that may be used to cancel the request */ - protected final void performRequestAsync(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, Set ignores) { + protected final Cancellable performRequestAsync(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { Optional validationException = request.validate(); if (validationException != null && validationException.isPresent()) { listener.onFailure(validationException.get()); - return; + return Cancellable.NO_OP; } - internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); + return internalPerformRequestAsync(request, requestConverter, options, responseConverter, listener, ignores); } /** * Provides common functionality for asynchronously performing a request. + * @return Cancellable instance that may be used to cancel the request */ - private void internalPerformRequestAsync(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction responseConverter, - ActionListener listener, Set ignores) { + private Cancellable internalPerformRequestAsync(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction responseConverter, + ActionListener listener, Set ignores) { Request req; try { req = requestConverter.apply(request); } catch (Exception e) { listener.onFailure(e); - return; + return Cancellable.NO_OP; } req.setOptions(options); ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores); - client.performRequestAsync(req, responseListener); + return client.performRequestAsync(req, responseListener); } @@ -1634,28 +1679,29 @@ public class RestHighLevelClient implements Closeable { /** * Asynchronous request which returns empty {@link Optional}s in the case of 404s or parses entity into an Optional + * @return Cancellable instance that may be used to cancel the request */ - protected final void performRequestAsyncAndParseOptionalEntity(Req request, - CheckedFunction requestConverter, - RequestOptions options, - CheckedFunction entityParser, - ActionListener> listener) { + protected final Cancellable performRequestAsyncAndParseOptionalEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + ActionListener> listener) { Optional validationException = request.validate(); if (validationException != null && validationException.isPresent()) { listener.onFailure(validationException.get()); - return; + return Cancellable.NO_OP; } Request req; try { req = requestConverter.apply(request); } catch (Exception e) { listener.onFailure(e); - return; + return Cancellable.NO_OP; } req.setOptions(options); ResponseListener responseListener = wrapResponseListener404sOptional(response -> parseEntity(response.getEntity(), entityParser), listener); - client.performRequestAsync(req, responseListener); + return client.performRequestAsync(req, responseListener); } final ResponseListener wrapResponseListener404sOptional(CheckedFunction responseConverter, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java index cf0ceec70b5..55dabc2be0e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java @@ -30,8 +30,6 @@ import org.elasticsearch.client.rollup.GetRollupJobRequest; import org.elasticsearch.client.rollup.GetRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupCapsRequest; import org.elasticsearch.client.rollup.GetRollupCapsResponse; -import org.elasticsearch.client.rollup.GetRollupJobRequest; -import org.elasticsearch.client.rollup.GetRollupJobResponse; import org.elasticsearch.client.rollup.PutRollupJobRequest; import org.elasticsearch.client.rollup.StartRollupJobRequest; import org.elasticsearch.client.rollup.StartRollupJobResponse; @@ -80,9 +78,11 @@ public class RollupClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putRollupJobAsync(PutRollupJobRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable putRollupJobAsync(PutRollupJobRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, RollupRequestConverters::putJob, options, AcknowledgedResponse::fromXContent, @@ -113,10 +113,11 @@ public class RollupClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void startRollupJobAsync(StartRollupJobRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable startRollupJobAsync(StartRollupJobRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, RollupRequestConverters::startJob, options, StartRollupJobResponse::fromXContent, @@ -147,10 +148,11 @@ public class RollupClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void stopRollupJobAsync(StopRollupJobRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable stopRollupJobAsync(StopRollupJobRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, RollupRequestConverters::stopJob, options, StopRollupJobResponse::fromXContent, @@ -180,11 +182,12 @@ public class RollupClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteRollupJobAsync(DeleteRollupJobRequest request, - RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable deleteRollupJobAsync(DeleteRollupJobRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, RollupRequestConverters::deleteJob, options, AcknowledgedResponse::fromXContent, @@ -215,11 +218,11 @@ public class RollupClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - - - public void getRollupJobAsync(GetRollupJobRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getRollupJobAsync(GetRollupJobRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, RollupRequestConverters::getJob, options, GetRollupJobResponse::fromXContent, @@ -251,9 +254,10 @@ public class RollupClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void searchAsync(SearchRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable searchAsync(SearchRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, RollupRequestConverters::search, options, @@ -286,10 +290,11 @@ public class RollupClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getRollupCapabilitiesAsync(GetRollupCapsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getRollupCapabilitiesAsync(GetRollupCapsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, RollupRequestConverters::getRollupCaps, options, GetRollupCapsResponse::fromXContent, @@ -322,10 +327,11 @@ public class RollupClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getRollupIndexCapabilitiesAsync(GetRollupIndexCapsRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, + public Cancellable getRollupIndexCapabilitiesAsync(GetRollupIndexCapsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, RollupRequestConverters::getRollupIndexCaps, options, GetRollupIndexCapsResponse::fromXContent, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java index a807b798f57..ef2e9642c98 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -113,9 +113,10 @@ public final class SecurityClient { * @param request the request with the user's name * @param options the request options (e.g., headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getUsersAsync(GetUsersRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::getUsers, options, + public Cancellable getUsersAsync(GetUsersRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::getUsers, options, GetUsersResponse::fromXContent, listener, emptySet()); } @@ -142,9 +143,10 @@ public final class SecurityClient { * @param request the request with the user's information * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putUserAsync(PutUserRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::putUser, options, + public Cancellable putUserAsync(PutUserRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::putUser, options, PutUserResponse::fromXContent, listener, emptySet()); } @@ -169,9 +171,10 @@ public final class SecurityClient { * @param request the request with the user to delete * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteUserAsync(DeleteUserRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::deleteUser, options, + public Cancellable deleteUserAsync(DeleteUserRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::deleteUser, options, DeleteUserResponse::fromXContent, listener, singleton(404)); } @@ -196,10 +199,11 @@ public final class SecurityClient { * @param request the request with the role mapping information * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putRoleMappingAsync(final PutRoleMappingRequest request, final RequestOptions options, - final ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::putRoleMapping, options, + public Cancellable putRoleMappingAsync(final PutRoleMappingRequest request, final RequestOptions options, + final ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::putRoleMapping, options, PutRoleMappingResponse::fromXContent, listener, emptySet()); } @@ -216,7 +220,8 @@ public final class SecurityClient { * @throws IOException in case there is a problem sending the request or * parsing back the response */ - public GetRoleMappingsResponse getRoleMappings(final GetRoleMappingsRequest request, final RequestOptions options) throws IOException { + public GetRoleMappingsResponse getRoleMappings(final GetRoleMappingsRequest request, + final RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::getRoleMappings, options, GetRoleMappingsResponse::fromXContent, emptySet()); } @@ -230,10 +235,11 @@ public final class SecurityClient { * If no role mapping name is provided then retrieves all role mappings. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getRoleMappingsAsync(final GetRoleMappingsRequest request, final RequestOptions options, - final ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::getRoleMappings, + public Cancellable getRoleMappingsAsync(final GetRoleMappingsRequest request, final RequestOptions options, + final ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::getRoleMappings, options, GetRoleMappingsResponse::fromXContent, listener, emptySet()); } @@ -276,10 +282,11 @@ public final class SecurityClient { * @param request the request with the user to enable * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void enableUserAsync(EnableUserRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsync(request, SecurityRequestConverters::enableUser, options, + public Cancellable enableUserAsync(EnableUserRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsync(request, SecurityRequestConverters::enableUser, options, RestHighLevelClient::convertExistsResponse, listener, emptySet()); } @@ -292,11 +299,12 @@ public final class SecurityClient { * @param request the request with the user to enable * @param listener the listener to be notified upon request completion * @deprecated use {@link #enableUserAsync(EnableUserRequest, RequestOptions, ActionListener)} instead + * @return cancellable that may be used to cancel the request */ @Deprecated - public void enableUserAsync(RequestOptions options, EnableUserRequest request, - ActionListener listener) { - enableUserAsync(request, options, listener); + public Cancellable enableUserAsync(RequestOptions options, EnableUserRequest request, + ActionListener listener) { + return enableUserAsync(request, options, listener); } /** @@ -338,10 +346,11 @@ public final class SecurityClient { * @param request the request with the user to disable * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void disableUserAsync(DisableUserRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsync(request, SecurityRequestConverters::disableUser, options, + public Cancellable disableUserAsync(DisableUserRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsync(request, SecurityRequestConverters::disableUser, options, RestHighLevelClient::convertExistsResponse, listener, emptySet()); } @@ -354,11 +363,12 @@ public final class SecurityClient { * @param request the request with the user to disable * @param listener the listener to be notified upon request completion * @deprecated use {@link #disableUserAsync(DisableUserRequest, RequestOptions, ActionListener)} instead + * @return cancellable that may be used to cancel the request */ @Deprecated - public void disableUserAsync(RequestOptions options, DisableUserRequest request, - ActionListener listener) { - disableUserAsync(request, options, listener); + public Cancellable disableUserAsync(RequestOptions options, DisableUserRequest request, + ActionListener listener) { + return disableUserAsync(request, options, listener); } /** @@ -381,9 +391,10 @@ public final class SecurityClient { * * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void authenticateAsync(RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(AuthenticateRequest.INSTANCE, AuthenticateRequest::getRequest, options, + public Cancellable authenticateAsync(RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(AuthenticateRequest.INSTANCE, AuthenticateRequest::getRequest, options, AuthenticateResponse::fromXContent, listener, emptySet()); } @@ -405,13 +416,14 @@ public final class SecurityClient { * Asynchronously determine whether the current user has a specified list of privileges * See * the docs for more. - * * @param request the request with the privileges to check * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void hasPrivilegesAsync(HasPrivilegesRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::hasPrivileges, options, + public Cancellable hasPrivilegesAsync(HasPrivilegesRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::hasPrivileges, options, HasPrivilegesResponse::fromXContent, listener, emptySet()); } @@ -428,9 +440,11 @@ public final class SecurityClient { * Asynchronously retrieve the set of effective privileges held by the current user. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getUserPrivilegesAsync(RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(GetUserPrivilegesRequest.INSTANCE, GetUserPrivilegesRequest::getRequest, + public Cancellable getUserPrivilegesAsync(RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( + GetUserPrivilegesRequest.INSTANCE, GetUserPrivilegesRequest::getRequest, options, GetUserPrivilegesResponse::fromXContent, listener, emptySet()); } @@ -457,10 +471,11 @@ public final class SecurityClient { * @param request the request with the realm names and usernames to clear the cache for * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void clearRealmCacheAsync(ClearRealmCacheRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::clearRealmCache, options, + public Cancellable clearRealmCacheAsync(ClearRealmCacheRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::clearRealmCache, options, ClearRealmCacheResponse::fromXContent, listener, emptySet()); } @@ -487,10 +502,11 @@ public final class SecurityClient { * @param request the request with the roles for which the cache should be cleared. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void clearRolesCacheAsync(ClearRolesCacheRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::clearRolesCache, options, + public Cancellable clearRolesCacheAsync(ClearRolesCacheRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::clearRolesCache, options, ClearRolesCacheResponse::fromXContent, listener, emptySet()); } @@ -515,9 +531,11 @@ public final class SecurityClient { * * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getSslCertificatesAsync(RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(GetSslCertificatesRequest.INSTANCE, GetSslCertificatesRequest::getRequest, + public Cancellable getSslCertificatesAsync(RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( + GetSslCertificatesRequest.INSTANCE, GetSslCertificatesRequest::getRequest, options, GetSslCertificatesResponse::fromXContent, listener, emptySet()); } @@ -560,10 +578,11 @@ public final class SecurityClient { * @param request the request with the user's new password * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void changePasswordAsync(ChangePasswordRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsync(request, SecurityRequestConverters::changePassword, options, + public Cancellable changePasswordAsync(ChangePasswordRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsync(request, SecurityRequestConverters::changePassword, options, RestHighLevelClient::convertExistsResponse, listener, emptySet()); } @@ -576,11 +595,12 @@ public final class SecurityClient { * @param request the request with the user's new password * @param listener the listener to be notified upon request completion * @deprecated use {@link #changePasswordAsync(ChangePasswordRequest, RequestOptions, ActionListener)} instead + * @return cancellable that may be used to cancel the request */ @Deprecated - public void changePasswordAsync(RequestOptions options, ChangePasswordRequest request, - ActionListener listener) { - changePasswordAsync(request, options, listener); + public Cancellable changePasswordAsync(RequestOptions options, ChangePasswordRequest request, + ActionListener listener) { + return changePasswordAsync(request, options, listener); } /** @@ -605,9 +625,10 @@ public final class SecurityClient { * @param request the request with the roles to get * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getRolesAsync(GetRolesRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::getRoles, options, + public Cancellable getRolesAsync(GetRolesRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::getRoles, options, GetRolesResponse::fromXContent, listener, emptySet()); } @@ -634,9 +655,10 @@ public final class SecurityClient { * @param request the request containing the role to create or update * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putRoleAsync(PutRoleRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::putRole, options, + public Cancellable putRoleAsync(PutRoleRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::putRole, options, PutRoleResponse::fromXContent, listener, emptySet()); } @@ -662,10 +684,12 @@ public final class SecurityClient { * @param request the request with the role mapping name to be deleted. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteRoleMappingAsync(DeleteRoleMappingRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::deleteRoleMapping, options, + public Cancellable deleteRoleMappingAsync(DeleteRoleMappingRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, + SecurityRequestConverters::deleteRoleMapping, options, DeleteRoleMappingResponse::fromXContent, listener, emptySet()); } @@ -690,9 +714,11 @@ public final class SecurityClient { * @param request the request with the role to delete * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteRoleAsync(DeleteRoleRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::deleteRole, options, + public Cancellable deleteRoleAsync(DeleteRoleRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::deleteRole, options, DeleteRoleResponse::fromXContent, listener, singleton(404)); } @@ -719,9 +745,11 @@ public final class SecurityClient { * @param request the request for the token * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void createTokenAsync(CreateTokenRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::createToken, options, + public Cancellable createTokenAsync(CreateTokenRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::createToken, options, CreateTokenResponse::fromXContent, listener, emptySet()); } @@ -744,14 +772,14 @@ public final class SecurityClient { * Asynchronously invalidates an OAuth2 token. * See * the docs for more. - * * @param request the request to invalidate the token * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void invalidateTokenAsync(InvalidateTokenRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::invalidateToken, options, + public Cancellable invalidateTokenAsync(InvalidateTokenRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::invalidateToken, options, InvalidateTokenResponse::fromXContent, listener, emptySet()); } @@ -777,10 +805,13 @@ public final class SecurityClient { * * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getBuiltinPrivilegesAsync(final RequestOptions options, final ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(GetBuiltinPrivilegesRequest.INSTANCE, - GetBuiltinPrivilegesRequest::getRequest, options, GetBuiltinPrivilegesResponse::fromXContent, listener, emptySet()); + public Cancellable getBuiltinPrivilegesAsync(final RequestOptions options, + final ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(GetBuiltinPrivilegesRequest.INSTANCE, + GetBuiltinPrivilegesRequest::getRequest, options, GetBuiltinPrivilegesResponse::fromXContent, + listener, emptySet()); } /** @@ -806,16 +837,16 @@ public final class SecurityClient { * Asynchronously get application privilege(s). * See * the docs for more. - * - * @param request {@link GetPrivilegesRequest} with the application name and the privilege name. + * @param request {@link GetPrivilegesRequest} with the application name and the privilege name. * If no application name is provided, information about all privileges for all applications is retrieved. * If no privilege name is provided, information about all privileges of the specified application is retrieved. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getPrivilegesAsync(final GetPrivilegesRequest request, final RequestOptions options, - final ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::getPrivileges, + public Cancellable getPrivilegesAsync(final GetPrivilegesRequest request, final RequestOptions options, + final ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::getPrivileges, options, GetPrivilegesResponse::fromXContent, listener, emptySet()); } @@ -844,10 +875,11 @@ public final class SecurityClient { * @param options the request options (e.g. headers), use * {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putPrivilegesAsync(final PutPrivilegesRequest request, final RequestOptions options, - final ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::putPrivileges, options, + public Cancellable putPrivilegesAsync(final PutPrivilegesRequest request, final RequestOptions options, + final ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::putPrivileges, options, PutPrivilegesResponse::fromXContent, listener, emptySet()); } @@ -874,10 +906,11 @@ public final class SecurityClient { * @param request the request with the application privilege to delete * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deletePrivilegesAsync(DeletePrivilegesRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::deletePrivileges, options, + public Cancellable deletePrivilegesAsync(DeletePrivilegesRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::deletePrivileges, options, DeletePrivilegesResponse::fromXContent, listener, singleton(404)); } @@ -904,10 +937,11 @@ public final class SecurityClient { * @param request the request to create a API key * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void createApiKeyAsync(final CreateApiKeyRequest request, final RequestOptions options, - final ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::createApiKey, options, + public Cancellable createApiKeyAsync(final CreateApiKeyRequest request, final RequestOptions options, + final ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::createApiKey, options, CreateApiKeyResponse::fromXContent, listener, emptySet()); } @@ -934,10 +968,11 @@ public final class SecurityClient { * @param request the request to retrieve API key(s) * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getApiKeyAsync(final GetApiKeyRequest request, final RequestOptions options, - final ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::getApiKey, options, + public Cancellable getApiKeyAsync(final GetApiKeyRequest request, final RequestOptions options, + final ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::getApiKey, options, GetApiKeyResponse::fromXContent, listener, emptySet()); } @@ -965,10 +1000,11 @@ public final class SecurityClient { * @param request the request to invalidate API key(s) * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void invalidateApiKeyAsync(final InvalidateApiKeyRequest request, final RequestOptions options, - final ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::invalidateApiKey, options, + public Cancellable invalidateApiKeyAsync(final InvalidateApiKeyRequest request, final RequestOptions options, + final ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::invalidateApiKey, options, InvalidateApiKeyResponse::fromXContent, listener, emptySet()); } @@ -977,7 +1013,7 @@ public final class SecurityClient { * authenticated TLS session, and it is validated by the PKI realms with {@code delegation.enabled} toggled to {@code true}.
* See the * docs for more details. - * + * * @param request the request containing the certificate chain * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response from the delegate-pki-authentication API key call @@ -995,14 +1031,14 @@ public final class SecurityClient { * {@code true}.
* See the * docs for more details. - * + * * @param request the request containing the certificate chain * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion */ - public void delegatePkiAuthenticationAsync(DelegatePkiAuthenticationRequest request, RequestOptions options, + public Cancellable delegatePkiAuthenticationAsync(DelegatePkiAuthenticationRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::delegatePkiAuthentication, options, + return restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::delegatePkiAuthentication, options, DelegatePkiAuthenticationResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index d3b2ea466f4..134dc921c45 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -79,10 +79,12 @@ public final class SnapshotClient { * @param getRepositoriesRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getRepositoryAsync(GetRepositoriesRequest getRepositoriesRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getRepositoriesRequest, SnapshotRequestConverters::getRepositories, options, + public Cancellable getRepositoryAsync(GetRepositoriesRequest getRepositoriesRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(getRepositoriesRequest, + SnapshotRequestConverters::getRepositories, options, GetRepositoriesResponse::fromXContent, listener, emptySet()); } @@ -107,10 +109,12 @@ public final class SnapshotClient { * @param putRepositoryRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void createRepositoryAsync(PutRepositoryRequest putRepositoryRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, SnapshotRequestConverters::createRepository, options, + public Cancellable createRepositoryAsync(PutRepositoryRequest putRepositoryRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, + SnapshotRequestConverters::createRepository, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -136,10 +140,12 @@ public final class SnapshotClient { * @param deleteRepositoryRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteRepositoryAsync(DeleteRepositoryRequest deleteRepositoryRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, SnapshotRequestConverters::deleteRepository, options, + public Cancellable deleteRepositoryAsync(DeleteRepositoryRequest deleteRepositoryRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, + SnapshotRequestConverters::deleteRepository, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -165,10 +171,12 @@ public final class SnapshotClient { * @param verifyRepositoryRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void verifyRepositoryAsync(VerifyRepositoryRequest verifyRepositoryRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, SnapshotRequestConverters::verifyRepository, options, + public Cancellable verifyRepositoryAsync(VerifyRepositoryRequest verifyRepositoryRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, + SnapshotRequestConverters::verifyRepository, options, VerifyRepositoryResponse::fromXContent, listener, emptySet()); } @@ -194,10 +202,11 @@ public final class SnapshotClient { * @param cleanupRepositoryRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void cleanupRepositoryAsync(CleanupRepositoryRequest cleanupRepositoryRequest, RequestOptions options, + public Cancellable cleanupRepositoryAsync(CleanupRepositoryRequest cleanupRepositoryRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(cleanupRepositoryRequest, SnapshotRequestConverters::cleanupRepository, + return restHighLevelClient.performRequestAsyncAndParseEntity(cleanupRepositoryRequest, SnapshotRequestConverters::cleanupRepository, options, CleanupRepositoryResponse::fromXContent, listener, emptySet()); } @@ -218,10 +227,12 @@ public final class SnapshotClient { *

* See Snapshot and Restore * API on elastic.co + * @return cancellable that may be used to cancel the request */ - public void createAsync(CreateSnapshotRequest createSnapshotRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(createSnapshotRequest, SnapshotRequestConverters::createSnapshot, options, + public Cancellable createAsync(CreateSnapshotRequest createSnapshotRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(createSnapshotRequest, + SnapshotRequestConverters::createSnapshot, options, CreateSnapshotResponse::fromXContent, listener, emptySet()); } @@ -244,13 +255,15 @@ public final class SnapshotClient { * Asynchronously get snapshots. * See Snapshot and Restore * API on elastic.co - * - * @param getSnapshotsRequest the request + * @param getSnapshotsRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getAsync(GetSnapshotsRequest getSnapshotsRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getSnapshotsRequest, SnapshotRequestConverters::getSnapshots, options, + public Cancellable getAsync(GetSnapshotsRequest getSnapshotsRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(getSnapshotsRequest, + SnapshotRequestConverters::getSnapshots, options, GetSnapshotsResponse::fromXContent, listener, emptySet()); } @@ -276,10 +289,12 @@ public final class SnapshotClient { * @param snapshotsStatusRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void statusAsync(SnapshotsStatusRequest snapshotsStatusRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(snapshotsStatusRequest, SnapshotRequestConverters::snapshotsStatus, options, + public Cancellable statusAsync(SnapshotsStatusRequest snapshotsStatusRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(snapshotsStatusRequest, + SnapshotRequestConverters::snapshotsStatus, options, SnapshotsStatusResponse::fromXContent, listener, emptySet()); } @@ -306,10 +321,12 @@ public final class SnapshotClient { * @param restoreSnapshotRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void restoreAsync(RestoreSnapshotRequest restoreSnapshotRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(restoreSnapshotRequest, SnapshotRequestConverters::restoreSnapshot, options, + public Cancellable restoreAsync(RestoreSnapshotRequest restoreSnapshotRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(restoreSnapshotRequest, + SnapshotRequestConverters::restoreSnapshot, options, RestoreSnapshotResponse::fromXContent, listener, emptySet()); } @@ -324,7 +341,8 @@ public final class SnapshotClient { * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse delete(DeleteSnapshotRequest deleteSnapshotRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(deleteSnapshotRequest, SnapshotRequestConverters::deleteSnapshot, options, + return restHighLevelClient.performRequestAndParseEntity(deleteSnapshotRequest, + SnapshotRequestConverters::deleteSnapshot, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -336,10 +354,12 @@ public final class SnapshotClient { * @param deleteSnapshotRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteAsync(DeleteSnapshotRequest deleteSnapshotRequest, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(deleteSnapshotRequest, SnapshotRequestConverters::deleteSnapshot, options, + public Cancellable deleteAsync(DeleteSnapshotRequest deleteSnapshotRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(deleteSnapshotRequest, + SnapshotRequestConverters::deleteSnapshot, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java index 4bf7565222a..04ccd323933 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java @@ -65,12 +65,13 @@ public final class TasksClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void listAsync(ListTasksRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, TasksRequestConverters::listTasks, options, + public Cancellable listAsync(ListTasksRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, TasksRequestConverters::listTasks, options, ListTasksResponse::fromXContent, listener, emptySet()); } - + /** * Get a task using the Task Management API. * See @@ -82,9 +83,9 @@ public final class TasksClient { */ public Optional get(GetTaskRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseOptionalEntity(request, TasksRequestConverters::getTask, options, - GetTaskResponse::fromXContent); - } - + GetTaskResponse::fromXContent); + } + /** * Get a task using the Task Management API. * See @@ -92,12 +93,14 @@ public final class TasksClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener an actionlistener that takes an optional response (404s are returned as an empty Optional) + * @return cancellable that may be used to cancel the request */ - public void getAsync(GetTaskRequest request, RequestOptions options, ActionListener> listener) { - - restHighLevelClient.performRequestAsyncAndParseOptionalEntity(request, TasksRequestConverters::getTask, options, + public Cancellable getAsync(GetTaskRequest request, RequestOptions options, + ActionListener> listener) { + + return restHighLevelClient.performRequestAsyncAndParseOptionalEntity(request, TasksRequestConverters::getTask, options, GetTaskResponse::fromXContent, listener); - } + } /** * Cancel one or more cluster tasks using the Task Management API. @@ -128,9 +131,11 @@ public final class TasksClient { * @param cancelTasksRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void cancelAsync(CancelTasksRequest cancelTasksRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable cancelAsync(CancelTasksRequest cancelTasksRequest, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( cancelTasksRequest, TasksRequestConverters::cancelTasks, options, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java index 9d75132a903..9a1ad6815aa 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java @@ -71,10 +71,11 @@ public final class WatcherClient { * See * the docs for more. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return cancellable that may be used to cancel the request */ - public void startWatchServiceAsync(StartWatchServiceRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable startWatchServiceAsync(StartWatchServiceRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, WatcherRequestConverters::startWatchService, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -98,10 +99,11 @@ public final class WatcherClient { * the docs for more. * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return cancellable that may be used to cancel the request */ - public void stopWatchServiceAsync(StopWatchServiceRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity( + public Cancellable stopWatchServiceAsync(StopWatchServiceRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( request, WatcherRequestConverters::stopWatchService, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -126,10 +128,11 @@ public final class WatcherClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void putWatchAsync(PutWatchRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::putWatch, options, + public Cancellable putWatchAsync(PutWatchRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::putWatch, options, PutWatchResponse::fromXContent, listener, emptySet()); } @@ -154,10 +157,11 @@ public final class WatcherClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void getWatchAsync(GetWatchRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::getWatch, options, + public Cancellable getWatchAsync(GetWatchRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::getWatch, options, GetWatchResponse::fromXContent, listener, emptySet()); } @@ -183,10 +187,11 @@ public final class WatcherClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deactivateWatchAsync(DeactivateWatchRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::deactivateWatch, options, + public Cancellable deactivateWatchAsync(DeactivateWatchRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::deactivateWatch, options, DeactivateWatchResponse::fromXContent, listener, emptySet()); } @@ -211,9 +216,10 @@ public final class WatcherClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void deleteWatchAsync(DeleteWatchRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::deleteWatch, options, + public Cancellable deleteWatchAsync(DeleteWatchRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::deleteWatch, options, DeleteWatchResponse::fromXContent, listener, singleton(404)); } @@ -238,9 +244,10 @@ public final class WatcherClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon completion of the request + * @return cancellable that may be used to cancel the request */ - public void ackWatchAsync(AckWatchRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::ackWatch, options, + public Cancellable ackWatchAsync(AckWatchRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::ackWatch, options, AckWatchResponse::fromXContent, listener, emptySet()); } @@ -265,9 +272,11 @@ public final class WatcherClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void activateWatchAsync(ActivateWatchRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::activateWatch, options, + public Cancellable activateWatchAsync(ActivateWatchRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::activateWatch, options, ActivateWatchResponse::fromXContent, listener, singleton(404)); } @@ -292,9 +301,11 @@ public final class WatcherClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notifed upon request completion + * @return cancellable that may be used to cancel the request */ - public void executeWatchAsync(ExecuteWatchRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::executeWatch, options, + public Cancellable executeWatchAsync(ExecuteWatchRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::executeWatch, options, ExecuteWatchResponse::fromXContent, listener, emptySet()); } @@ -319,9 +330,11 @@ public final class WatcherClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void watcherStatsAsync(WatcherStatsRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::watcherStats, options, + public Cancellable watcherStatsAsync(WatcherStatsRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::watcherStats, options, WatcherStatsResponse::fromXContent, listener, emptySet()); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java index 69cdd329e39..cc020de92f6 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java @@ -67,10 +67,11 @@ public final class XPackClient { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void infoAsync(XPackInfoRequest request, RequestOptions options, - ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, XPackRequestConverters::info, options, + public Cancellable infoAsync(XPackInfoRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, XPackRequestConverters::info, options, XPackInfoResponse::fromXContent, listener, emptySet()); } @@ -89,9 +90,10 @@ public final class XPackClient { * Asynchronously fetch usage information about X-Pack features from the cluster. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request */ - public void usageAsync(XPackUsageRequest request, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, XPackRequestConverters::usage, options, + public Cancellable usageAsync(XPackUsageRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, XPackRequestConverters::usage, options, XPackUsageResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java index 4b40fde53e9..95ab3db7e71 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java @@ -24,8 +24,13 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.internal.SearchContext; +import java.io.IOException; import java.util.Arrays; import java.util.Objects; @@ -34,32 +39,43 @@ import static org.elasticsearch.action.search.SearchRequest.DEFAULT_INDICES_OPTI /** * Encapsulates a request to _count API against one, several or all indices. */ -public final class CountRequest extends ActionRequest implements IndicesRequest.Replaceable { +public final class CountRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContentObject { private String[] indices = Strings.EMPTY_ARRAY; private String[] types = Strings.EMPTY_ARRAY; private String routing; private String preference; - private SearchSourceBuilder searchSourceBuilder; + private QueryBuilder query; private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; + private int terminateAfter = SearchContext.DEFAULT_TERMINATE_AFTER; + private Float minScore; - public CountRequest() { - this.searchSourceBuilder = new SearchSourceBuilder(); - } + public CountRequest() {} /** * Constructs a new count request against the indices. No indices provided here means that count will execute on all indices. */ public CountRequest(String... indices) { - this(indices, new SearchSourceBuilder()); + indices(indices); } /** * Constructs a new search request against the provided indices with the given search source. + * + * @deprecated The count api only supports a query. Use {@link #CountRequest(String[], QueryBuilder)} instead. */ + @Deprecated public CountRequest(String[] indices, SearchSourceBuilder searchSourceBuilder) { indices(indices); - this.searchSourceBuilder = searchSourceBuilder; + this.query = Objects.requireNonNull(searchSourceBuilder, "source must not be null").query(); + } + + /** + * Constructs a new search request against the provided indices with the given query. + */ + public CountRequest(String[] indices, QueryBuilder query) { + indices(indices); + this.query = Objects.requireNonNull(query, "query must not be null");; } @Override @@ -81,9 +97,20 @@ public final class CountRequest extends ActionRequest implements IndicesRequest. /** * The source of the count request. + * + * @deprecated The count api only supports a query. Use {@link #query(QueryBuilder)} instead. */ + @Deprecated public CountRequest source(SearchSourceBuilder searchSourceBuilder) { - this.searchSourceBuilder = Objects.requireNonNull(searchSourceBuilder, "source must not be null"); + this.query = Objects.requireNonNull(searchSourceBuilder, "source must not be null").query(); + return this; + } + + /** + * Sets the query to execute for this count request. + */ + public CountRequest query(QueryBuilder query) { + this.query = Objects.requireNonNull(query, "query must not be null"); return this; } @@ -156,20 +183,23 @@ public final class CountRequest extends ActionRequest implements IndicesRequest. } public Float minScore() { - return this.searchSourceBuilder.minScore(); + return minScore; } public CountRequest minScore(Float minScore) { - this.searchSourceBuilder.minScore(minScore); + this.minScore = minScore; return this; } public int terminateAfter() { - return this.searchSourceBuilder.terminateAfter(); + return this.terminateAfter; } public CountRequest terminateAfter(int terminateAfter) { - this.searchSourceBuilder.terminateAfter(terminateAfter); + if (terminateAfter < 0) { + throw new IllegalArgumentException("terminateAfter must be > 0"); + } + this.terminateAfter = terminateAfter; return this; } @@ -182,8 +212,31 @@ public final class CountRequest extends ActionRequest implements IndicesRequest. return Arrays.copyOf(this.types, this.types.length); } + /** + * @return the source builder + * @deprecated The count api only supports a query. Use {@link #query()} instead. + */ + @Deprecated public SearchSourceBuilder source() { - return this.searchSourceBuilder; + return new SearchSourceBuilder().query(query); + } + + /** + * @return The provided query to execute with the count request or + * null if no query was provided. + */ + public QueryBuilder query() { + return query; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (query != null) { + builder.field("query", query); + } + builder.endObject(); + return builder; } @Override @@ -199,12 +252,15 @@ public final class CountRequest extends ActionRequest implements IndicesRequest. Arrays.equals(indices, that.indices) && Arrays.equals(types, that.types) && Objects.equals(routing, that.routing) && - Objects.equals(preference, that.preference); + Objects.equals(preference, that.preference) && + Objects.equals(terminateAfter, that.terminateAfter) && + Objects.equals(minScore, that.minScore) && + Objects.equals(query, that.query); } @Override public int hashCode() { - int result = Objects.hash(indicesOptions, routing, preference); + int result = Objects.hash(indicesOptions, routing, preference, terminateAfter, minScore, query); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(types); return result; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetDataFrameAnalyticsStatsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetDataFrameAnalyticsStatsResponse.java index 5391a576e98..bfe7d21dfba 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetDataFrameAnalyticsStatsResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetDataFrameAnalyticsStatsResponse.java @@ -21,8 +21,8 @@ package org.elasticsearch.client.ml; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.client.dataframe.AcknowledgedTasksResponse; import org.elasticsearch.client.ml.dataframe.DataFrameAnalyticsStats; +import org.elasticsearch.client.transform.AcknowledgedTasksResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java index cdba0e5d5c7..1c333c0bad0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/dataframe/DataFrameAnalyticsConfig.java @@ -20,7 +20,7 @@ package org.elasticsearch.client.ml.dataframe; import org.elasticsearch.Version; -import org.elasticsearch.client.dataframe.transforms.util.TimeUtil; +import org.elasticsearch.client.transform.transforms.util.TimeUtil; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/GetSnapshotLifecycleStatsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/GetSnapshotLifecycleStatsRequest.java new file mode 100644 index 00000000000..285a179e3e6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/GetSnapshotLifecycleStatsRequest.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.slm; + +import org.elasticsearch.client.TimedRequest; + +public class GetSnapshotLifecycleStatsRequest extends TimedRequest { + + public GetSnapshotLifecycleStatsRequest() { + super(); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/GetSnapshotLifecycleStatsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/GetSnapshotLifecycleStatsResponse.java new file mode 100644 index 00000000000..1aed51afc72 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/GetSnapshotLifecycleStatsResponse.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.slm; + +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class GetSnapshotLifecycleStatsResponse implements ToXContentObject { + + private final SnapshotLifecycleStats stats; + + public GetSnapshotLifecycleStatsResponse(SnapshotLifecycleStats stats) { + this.stats = stats; + } + + public SnapshotLifecycleStats getStats() { + return this.stats; + } + + public static GetSnapshotLifecycleStatsResponse fromXContent(XContentParser parser) throws IOException { + return new GetSnapshotLifecycleStatsResponse(SnapshotLifecycleStats.parse(parser)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return stats.toXContent(builder, params); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + GetSnapshotLifecycleStatsResponse other = (GetSnapshotLifecycleStatsResponse) o; + return Objects.equals(this.stats, other.stats); + } + + @Override + public int hashCode() { + return Objects.hash(this.stats); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecyclePolicy.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecyclePolicy.java index 0370cb262f1..e9c521772a5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecyclePolicy.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecyclePolicy.java @@ -38,11 +38,13 @@ public class SnapshotLifecyclePolicy implements ToXContentObject { private final String schedule; private final String repository; private final Map configuration; + private final SnapshotRetentionConfiguration retentionPolicy; private static final ParseField NAME = new ParseField("name"); private static final ParseField SCHEDULE = new ParseField("schedule"); private static final ParseField REPOSITORY = new ParseField("repository"); private static final ParseField CONFIG = new ParseField("config"); + private static final ParseField RETENTION = new ParseField("retention"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = @@ -52,7 +54,8 @@ public class SnapshotLifecyclePolicy implements ToXContentObject { String schedule = (String) a[1]; String repo = (String) a[2]; Map config = (Map) a[3]; - return new SnapshotLifecyclePolicy(id, name, schedule, repo, config); + SnapshotRetentionConfiguration retention = (SnapshotRetentionConfiguration) a[4]; + return new SnapshotLifecyclePolicy(id, name, schedule, repo, config, retention); }); static { @@ -60,15 +63,18 @@ public class SnapshotLifecyclePolicy implements ToXContentObject { PARSER.declareString(ConstructingObjectParser.constructorArg(), SCHEDULE); PARSER.declareString(ConstructingObjectParser.constructorArg(), REPOSITORY); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.map(), CONFIG); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SnapshotRetentionConfiguration::parse, RETENTION); } public SnapshotLifecyclePolicy(final String id, final String name, final String schedule, - final String repository, @Nullable Map configuration) { - this.id = Objects.requireNonNull(id); - this.name = name; - this.schedule = schedule; - this.repository = repository; + final String repository, @Nullable final Map configuration, + @Nullable final SnapshotRetentionConfiguration retentionPolicy) { + this.id = Objects.requireNonNull(id, "policy id is required"); + this.name = Objects.requireNonNull(name, "policy snapshot name is required"); + this.schedule = Objects.requireNonNull(schedule, "policy schedule is required"); + this.repository = Objects.requireNonNull(repository, "policy snapshot repository is required"); this.configuration = configuration; + this.retentionPolicy = retentionPolicy; } public String getId() { @@ -92,6 +98,11 @@ public class SnapshotLifecyclePolicy implements ToXContentObject { return this.configuration; } + @Nullable + public SnapshotRetentionConfiguration getRetentionPolicy() { + return this.retentionPolicy; + } + public static SnapshotLifecyclePolicy parse(XContentParser parser, String id) { return PARSER.apply(parser, id); } @@ -105,13 +116,16 @@ public class SnapshotLifecyclePolicy implements ToXContentObject { if (this.configuration != null) { builder.field(CONFIG.getPreferredName(), this.configuration); } + if (this.retentionPolicy != null) { + builder.field(RETENTION.getPreferredName(), this.retentionPolicy); + } builder.endObject(); return builder; } @Override public int hashCode() { - return Objects.hash(id, name, schedule, repository, configuration); + return Objects.hash(id, name, schedule, repository, configuration, retentionPolicy); } @Override @@ -128,7 +142,8 @@ public class SnapshotLifecyclePolicy implements ToXContentObject { Objects.equals(name, other.name) && Objects.equals(schedule, other.schedule) && Objects.equals(repository, other.repository) && - Objects.equals(configuration, other.configuration); + Objects.equals(configuration, other.configuration) && + Objects.equals(retentionPolicy, other.retentionPolicy); } @Override diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecyclePolicyMetadata.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecyclePolicyMetadata.java index 9b967e8c33b..d459069a290 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecyclePolicyMetadata.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecyclePolicyMetadata.java @@ -42,6 +42,7 @@ public class SnapshotLifecyclePolicyMetadata implements ToXContentObject { static final ParseField NEXT_EXECUTION_MILLIS = new ParseField("next_execution_millis"); static final ParseField NEXT_EXECUTION = new ParseField("next_execution"); static final ParseField SNAPSHOT_IN_PROGRESS = new ParseField("in_progress"); + static final ParseField POLICY_STATS = new ParseField("stats"); private final SnapshotLifecyclePolicy policy; private final long version; @@ -53,6 +54,7 @@ public class SnapshotLifecyclePolicyMetadata implements ToXContentObject { private final SnapshotInvocationRecord lastFailure; @Nullable private final SnapshotInProgress snapshotInProgress; + private final SnapshotLifecycleStats.SnapshotPolicyStats policyStats; @SuppressWarnings("unchecked") public static final ConstructingObjectParser PARSER = @@ -65,8 +67,9 @@ public class SnapshotLifecyclePolicyMetadata implements ToXContentObject { SnapshotInvocationRecord lastFailure = (SnapshotInvocationRecord) a[4]; long nextExecution = (long) a[5]; SnapshotInProgress sip = (SnapshotInProgress) a[6]; - - return new SnapshotLifecyclePolicyMetadata(policy, version, modifiedDate, lastSuccess, lastFailure, nextExecution, sip); + SnapshotLifecycleStats.SnapshotPolicyStats stats = (SnapshotLifecycleStats.SnapshotPolicyStats) a[7]; + return new SnapshotLifecyclePolicyMetadata(policy, version, modifiedDate, lastSuccess, + lastFailure, nextExecution, sip, stats); }); static { @@ -77,6 +80,9 @@ public class SnapshotLifecyclePolicyMetadata implements ToXContentObject { PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SnapshotInvocationRecord::parse, LAST_FAILURE); PARSER.declareLong(ConstructingObjectParser.constructorArg(), NEXT_EXECUTION_MILLIS); PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SnapshotInProgress::parse, SNAPSHOT_IN_PROGRESS); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), + (p, c) -> SnapshotLifecycleStats.SnapshotPolicyStats.parse(p, "policy"), POLICY_STATS); + } public static SnapshotLifecyclePolicyMetadata parse(XContentParser parser, String id) { @@ -86,7 +92,8 @@ public class SnapshotLifecyclePolicyMetadata implements ToXContentObject { public SnapshotLifecyclePolicyMetadata(SnapshotLifecyclePolicy policy, long version, long modifiedDate, SnapshotInvocationRecord lastSuccess, SnapshotInvocationRecord lastFailure, long nextExecution, - @Nullable SnapshotInProgress snapshotInProgress) { + @Nullable SnapshotInProgress snapshotInProgress, + SnapshotLifecycleStats.SnapshotPolicyStats policyStats) { this.policy = policy; this.version = version; this.modifiedDate = modifiedDate; @@ -94,6 +101,7 @@ public class SnapshotLifecyclePolicyMetadata implements ToXContentObject { this.lastFailure = lastFailure; this.nextExecution = nextExecution; this.snapshotInProgress = snapshotInProgress; + this.policyStats = policyStats; } public SnapshotLifecyclePolicy getPolicy() { @@ -124,6 +132,10 @@ public class SnapshotLifecyclePolicyMetadata implements ToXContentObject { return this.nextExecution; } + public SnapshotLifecycleStats.SnapshotPolicyStats getPolicyStats() { + return this.policyStats; + } + @Nullable public SnapshotInProgress getSnapshotInProgress() { return this.snapshotInProgress; @@ -145,13 +157,16 @@ public class SnapshotLifecyclePolicyMetadata implements ToXContentObject { if (snapshotInProgress != null) { builder.field(SNAPSHOT_IN_PROGRESS.getPreferredName(), snapshotInProgress); } + builder.startObject(POLICY_STATS.getPreferredName()); + this.policyStats.toXContent(builder, params); + builder.endObject(); builder.endObject(); return builder; } @Override public int hashCode() { - return Objects.hash(policy, version, modifiedDate, lastSuccess, lastFailure, nextExecution); + return Objects.hash(policy, version, modifiedDate, lastSuccess, lastFailure, nextExecution, policyStats); } @Override @@ -168,7 +183,8 @@ public class SnapshotLifecyclePolicyMetadata implements ToXContentObject { Objects.equals(modifiedDate, other.modifiedDate) && Objects.equals(lastSuccess, other.lastSuccess) && Objects.equals(lastFailure, other.lastFailure) && - Objects.equals(nextExecution, other.nextExecution); + Objects.equals(nextExecution, other.nextExecution) && + Objects.equals(policyStats, other.policyStats); } public static class SnapshotInProgress implements ToXContentObject { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecycleStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecycleStats.java new file mode 100644 index 00000000000..fc54f74649b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecycleStats.java @@ -0,0 +1,261 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.slm; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +public class SnapshotLifecycleStats implements ToXContentObject { + + private final long retentionRunCount; + private final long retentionFailedCount; + private final long retentionTimedOut; + private final long retentionTimeMs; + private final Map policyStats; + + public static final ParseField RETENTION_RUNS = new ParseField("retention_runs"); + public static final ParseField RETENTION_FAILED = new ParseField("retention_failed"); + public static final ParseField RETENTION_TIMED_OUT = new ParseField("retention_timed_out"); + public static final ParseField RETENTION_TIME = new ParseField("retention_deletion_time"); + public static final ParseField RETENTION_TIME_MILLIS = new ParseField("retention_deletion_time_millis"); + public static final ParseField POLICY_STATS = new ParseField("policy_stats"); + public static final ParseField TOTAL_TAKEN = new ParseField("total_snapshots_taken"); + public static final ParseField TOTAL_FAILED = new ParseField("total_snapshots_failed"); + public static final ParseField TOTAL_DELETIONS = new ParseField("total_snapshots_deleted"); + public static final ParseField TOTAL_DELETION_FAILURES = new ParseField("total_snapshot_deletion_failures"); + + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("snapshot_policy_stats", true, + a -> { + long runs = (long) a[0]; + long failed = (long) a[1]; + long timedOut = (long) a[2]; + long timeMs = (long) a[3]; + Map policyStatsMap = ((List) a[4]).stream() + .collect(Collectors.toMap(m -> m.policyId, Function.identity())); + return new SnapshotLifecycleStats(runs, failed, timedOut, timeMs, policyStatsMap); + }); + + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETENTION_RUNS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETENTION_FAILED); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETENTION_TIMED_OUT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETENTION_TIME_MILLIS); + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> SnapshotPolicyStats.parse(p, n), POLICY_STATS); + } + + // Package visible for testing + private SnapshotLifecycleStats(long retentionRuns, long retentionFailed, long retentionTimedOut, long retentionTimeMs, + Map policyStats) { + this.retentionRunCount = retentionRuns; + this.retentionFailedCount = retentionFailed; + this.retentionTimedOut = retentionTimedOut; + this.retentionTimeMs = retentionTimeMs; + this.policyStats = policyStats; + } + + public static SnapshotLifecycleStats parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public long getRetentionRunCount() { + return retentionRunCount; + } + + public long getRetentionFailedCount() { + return retentionFailedCount; + } + + public long getRetentionTimedOut() { + return retentionTimedOut; + } + + public long getRetentionTimeMillis() { + return retentionTimeMs; + } + + /** + * @return a map of per-policy stats for each SLM policy + */ + public Map getMetrics() { + return Collections.unmodifiableMap(this.policyStats); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(RETENTION_RUNS.getPreferredName(), this.retentionRunCount); + builder.field(RETENTION_FAILED.getPreferredName(), this.retentionFailedCount); + builder.field(RETENTION_TIMED_OUT.getPreferredName(), this.retentionTimedOut); + TimeValue retentionTime = TimeValue.timeValueMillis(this.retentionTimeMs); + builder.field(RETENTION_TIME.getPreferredName(), retentionTime); + builder.field(RETENTION_TIME_MILLIS.getPreferredName(), retentionTime.millis()); + + Map metrics = getMetrics(); + long totalTaken = metrics.values().stream().mapToLong(s -> s.snapshotsTaken).sum(); + long totalFailed = metrics.values().stream().mapToLong(s -> s.snapshotsFailed).sum(); + long totalDeleted = metrics.values().stream().mapToLong(s -> s.snapshotsDeleted).sum(); + long totalDeleteFailures = metrics.values().stream().mapToLong(s -> s.snapshotDeleteFailures).sum(); + builder.field(TOTAL_TAKEN.getPreferredName(), totalTaken); + builder.field(TOTAL_FAILED.getPreferredName(), totalFailed); + builder.field(TOTAL_DELETIONS.getPreferredName(), totalDeleted); + builder.field(TOTAL_DELETION_FAILURES.getPreferredName(), totalDeleteFailures); + builder.startObject(POLICY_STATS.getPreferredName()); + for (Map.Entry policy : metrics.entrySet()) { + SnapshotPolicyStats perPolicyMetrics = policy.getValue(); + builder.startObject(perPolicyMetrics.policyId); + perPolicyMetrics.toXContent(builder, params); + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(retentionRunCount, retentionFailedCount, retentionTimedOut, retentionTimeMs, policyStats); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + SnapshotLifecycleStats other = (SnapshotLifecycleStats) obj; + return retentionRunCount == other.retentionRunCount && + retentionFailedCount == other.retentionFailedCount && + retentionTimedOut == other.retentionTimedOut && + retentionTimeMs == other.retentionTimeMs && + Objects.equals(policyStats, other.policyStats); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public static class SnapshotPolicyStats implements ToXContentFragment { + private final String policyId; + private final long snapshotsTaken; + private final long snapshotsFailed; + private final long snapshotsDeleted; + private final long snapshotDeleteFailures; + + static final ParseField SNAPSHOTS_TAKEN = new ParseField("snapshots_taken"); + static final ParseField SNAPSHOTS_FAILED = new ParseField("snapshots_failed"); + static final ParseField SNAPSHOTS_DELETED = new ParseField("snapshots_deleted"); + static final ParseField SNAPSHOT_DELETION_FAILURES = new ParseField("snapshot_deletion_failures"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("snapshot_policy_stats", true, + (a, id) -> { + long taken = (long) a[0]; + long failed = (long) a[1]; + long deleted = (long) a[2]; + long deleteFailed = (long) a[3]; + return new SnapshotPolicyStats(id, taken, failed, deleted, deleteFailed); + }); + + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SNAPSHOTS_TAKEN); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SNAPSHOTS_FAILED); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SNAPSHOTS_DELETED); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SNAPSHOT_DELETION_FAILURES); + } + + public SnapshotPolicyStats(String policyId, long snapshotsTaken, long snapshotsFailed, long deleted, long failedDeletes) { + this.policyId = policyId; + this.snapshotsTaken = snapshotsTaken; + this.snapshotsFailed = snapshotsFailed; + this.snapshotsDeleted = deleted; + this.snapshotDeleteFailures = failedDeletes; + } + + public static SnapshotPolicyStats parse(XContentParser parser, String policyId) { + return PARSER.apply(parser, policyId); + } + + public long getSnapshotsTaken() { + return snapshotsTaken; + } + + public long getSnapshotsFailed() { + return snapshotsFailed; + } + + public long getSnapshotsDeleted() { + return snapshotsDeleted; + } + + public long getSnapshotDeleteFailures() { + return snapshotDeleteFailures; + } + + @Override + public int hashCode() { + return Objects.hash(policyId, snapshotsTaken, snapshotsFailed, snapshotsDeleted, snapshotDeleteFailures); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + SnapshotPolicyStats other = (SnapshotPolicyStats) obj; + return Objects.equals(policyId, other.policyId) && + snapshotsTaken == other.snapshotsTaken && + snapshotsFailed == other.snapshotsFailed && + snapshotsDeleted == other.snapshotsDeleted && + snapshotDeleteFailures == other.snapshotDeleteFailures; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(SnapshotPolicyStats.SNAPSHOTS_TAKEN.getPreferredName(), snapshotsTaken); + builder.field(SnapshotPolicyStats.SNAPSHOTS_FAILED.getPreferredName(), snapshotsFailed); + builder.field(SnapshotPolicyStats.SNAPSHOTS_DELETED.getPreferredName(), snapshotsDeleted); + builder.field(SnapshotPolicyStats.SNAPSHOT_DELETION_FAILURES.getPreferredName(), snapshotDeleteFailures); + return builder; + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotRetentionConfiguration.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotRetentionConfiguration.java new file mode 100644 index 00000000000..f98e61fef17 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotRetentionConfiguration.java @@ -0,0 +1,133 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.slm; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class SnapshotRetentionConfiguration implements ToXContentObject { + + public static final SnapshotRetentionConfiguration EMPTY = new SnapshotRetentionConfiguration(null, null, null); + + private static final ParseField EXPIRE_AFTER = new ParseField("expire_after"); + private static final ParseField MINIMUM_SNAPSHOT_COUNT = new ParseField("min_count"); + private static final ParseField MAXIMUM_SNAPSHOT_COUNT = new ParseField("max_count"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("snapshot_retention", true, a -> { + TimeValue expireAfter = a[0] == null ? null : TimeValue.parseTimeValue((String) a[0], EXPIRE_AFTER.getPreferredName()); + Integer minCount = (Integer) a[1]; + Integer maxCount = (Integer) a[2]; + return new SnapshotRetentionConfiguration(expireAfter, minCount, maxCount); + }); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), EXPIRE_AFTER); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MINIMUM_SNAPSHOT_COUNT); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAXIMUM_SNAPSHOT_COUNT); + } + + private final TimeValue expireAfter; + private final Integer minimumSnapshotCount; + private final Integer maximumSnapshotCount; + + public SnapshotRetentionConfiguration(@Nullable TimeValue expireAfter, + @Nullable Integer minimumSnapshotCount, + @Nullable Integer maximumSnapshotCount) { + this.expireAfter = expireAfter; + this.minimumSnapshotCount = minimumSnapshotCount; + this.maximumSnapshotCount = maximumSnapshotCount; + if (this.minimumSnapshotCount != null && this.minimumSnapshotCount < 1) { + throw new IllegalArgumentException("minimum snapshot count must be at least 1, but was: " + this.minimumSnapshotCount); + } + if (this.maximumSnapshotCount != null && this.maximumSnapshotCount < 1) { + throw new IllegalArgumentException("maximum snapshot count must be at least 1, but was: " + this.maximumSnapshotCount); + } + if ((maximumSnapshotCount != null && minimumSnapshotCount != null) && this.minimumSnapshotCount > this.maximumSnapshotCount) { + throw new IllegalArgumentException("minimum snapshot count " + this.minimumSnapshotCount + + " cannot be larger than maximum snapshot count " + this.maximumSnapshotCount); + } + } + + public static SnapshotRetentionConfiguration parse(XContentParser parser, String name) { + return PARSER.apply(parser, null); + } + + public TimeValue getExpireAfter() { + return this.expireAfter; + } + + public Integer getMinimumSnapshotCount() { + return this.minimumSnapshotCount; + } + + public Integer getMaximumSnapshotCount() { + return this.maximumSnapshotCount; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (expireAfter != null) { + builder.field(EXPIRE_AFTER.getPreferredName(), expireAfter.getStringRep()); + } + if (minimumSnapshotCount != null) { + builder.field(MINIMUM_SNAPSHOT_COUNT.getPreferredName(), minimumSnapshotCount); + } + if (maximumSnapshotCount != null) { + builder.field(MAXIMUM_SNAPSHOT_COUNT.getPreferredName(), maximumSnapshotCount); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(expireAfter, minimumSnapshotCount, maximumSnapshotCount); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + SnapshotRetentionConfiguration other = (SnapshotRetentionConfiguration) obj; + return Objects.equals(this.expireAfter, other.expireAfter) && + Objects.equals(minimumSnapshotCount, other.minimumSnapshotCount) && + Objects.equals(maximumSnapshotCount, other.maximumSnapshotCount); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/AcknowledgedTasksResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/AcknowledgedTasksResponse.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/AcknowledgedTasksResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/AcknowledgedTasksResponse.java index bccaf609d5f..32cfc5aee2b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/AcknowledgedTasksResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/AcknowledgedTasksResponse.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/DataFrameNamedXContentProvider.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/DataFrameNamedXContentProvider.java similarity index 89% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/DataFrameNamedXContentProvider.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/DataFrameNamedXContentProvider.java index 940b136c93d..bf4d5b4ade1 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/DataFrameNamedXContentProvider.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/DataFrameNamedXContentProvider.java @@ -17,10 +17,10 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; -import org.elasticsearch.client.dataframe.transforms.SyncConfig; -import org.elasticsearch.client.dataframe.transforms.TimeSyncConfig; +import org.elasticsearch.client.transform.transforms.SyncConfig; +import org.elasticsearch.client.transform.transforms.TimeSyncConfig; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.plugins.spi.NamedXContentProvider; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/DeleteDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/DeleteDataFrameTransformRequest.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/DeleteDataFrameTransformRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/DeleteDataFrameTransformRequest.java index 18323a7b2e4..d28779efd1d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/DeleteDataFrameTransformRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/DeleteDataFrameTransformRequest.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/GetDataFrameTransformRequest.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/GetDataFrameTransformRequest.java index cc69e0bd4cd..3c442b65010 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/GetDataFrameTransformRequest.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/GetDataFrameTransformResponse.java similarity index 97% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/GetDataFrameTransformResponse.java index 93fc91f08ce..e1ca2df503d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/GetDataFrameTransformResponse.java @@ -17,9 +17,9 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/GetDataFrameTransformStatsRequest.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/GetDataFrameTransformStatsRequest.java index 7522ae0d67c..579dd715cbc 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/GetDataFrameTransformStatsRequest.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/GetDataFrameTransformStatsResponse.java similarity index 97% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/GetDataFrameTransformStatsResponse.java index fe4698257ea..a1c4fc81974 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/GetDataFrameTransformStatsResponse.java @@ -17,11 +17,11 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStats; +import org.elasticsearch.client.transform.transforms.DataFrameTransformStats; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/PreviewDataFrameTransformRequest.java similarity index 96% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/PreviewDataFrameTransformRequest.java index 0e1cc1c71c8..ab06ebfa4c9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/PreviewDataFrameTransformRequest.java @@ -17,11 +17,11 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/PreviewDataFrameTransformResponse.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/PreviewDataFrameTransformResponse.java index 40e87b5768b..9f7cd2b313a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/PreviewDataFrameTransformResponse.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/PutDataFrameTransformRequest.java similarity index 97% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/PutDataFrameTransformRequest.java index 814414f04ee..e948ae53e0d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/PutDataFrameTransformRequest.java @@ -17,11 +17,11 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/StartDataFrameTransformRequest.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/StartDataFrameTransformRequest.java index 380105eec5f..208bebf58ab 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/StartDataFrameTransformRequest.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/StartDataFrameTransformResponse.java similarity index 97% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/StartDataFrameTransformResponse.java index 9b358ffdfa1..9a96d2de7ae 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StartDataFrameTransformResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/StartDataFrameTransformResponse.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/StopDataFrameTransformRequest.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/StopDataFrameTransformRequest.java index 4fb6164f2cc..3a662c2caec 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/StopDataFrameTransformRequest.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/StopDataFrameTransformResponse.java similarity index 97% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/StopDataFrameTransformResponse.java index 6d32474f70c..3993679fba8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/StopDataFrameTransformResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/StopDataFrameTransformResponse.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/UpdateDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/UpdateDataFrameTransformRequest.java similarity index 96% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/UpdateDataFrameTransformRequest.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/UpdateDataFrameTransformRequest.java index f5fdb9edbb2..d4fe836db0f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/UpdateDataFrameTransformRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/UpdateDataFrameTransformRequest.java @@ -17,11 +17,11 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigUpdate; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/UpdateDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/UpdateDataFrameTransformResponse.java similarity index 94% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/UpdateDataFrameTransformResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/UpdateDataFrameTransformResponse.java index 30b47c27d59..2afc8f9f3f5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/UpdateDataFrameTransformResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/UpdateDataFrameTransformResponse.java @@ -17,9 +17,9 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; import org.elasticsearch.common.xcontent.XContentParser; import java.util.Objects; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerPosition.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameIndexerPosition.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerPosition.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameIndexerPosition.java index 86a2527ffdd..6141f77c3b0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerPosition.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameIndexerPosition.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerTransformStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameIndexerTransformStats.java similarity index 99% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerTransformStats.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameIndexerTransformStats.java index f184810609a..23a25c511b2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerTransformStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameIndexerTransformStats.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.client.core.IndexerJobStats; import org.elasticsearch.common.ParseField; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformCheckpointStats.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStats.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformCheckpointStats.java index 7c8d853768b..1f9606fe2dc 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformCheckpointStats.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfo.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformCheckpointingInfo.java similarity index 97% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfo.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformCheckpointingInfo.java index 659863c3cf3..79d02c523ff 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfo.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformCheckpointingInfo.java @@ -17,9 +17,9 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; -import org.elasticsearch.client.dataframe.transforms.util.TimeUtil; +import org.elasticsearch.client.transform.transforms.util.TimeUtil; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformConfig.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformConfig.java index 2810d6a8cfa..d3abc73e6f3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformConfig.java @@ -17,11 +17,11 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.Version; -import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.client.dataframe.transforms.util.TimeUtil; +import org.elasticsearch.client.transform.transforms.pivot.PivotConfig; +import org.elasticsearch.client.transform.transforms.util.TimeUtil; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigUpdate.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformConfigUpdate.java similarity index 99% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigUpdate.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformConfigUpdate.java index afca172b564..945e8b82116 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigUpdate.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformConfigUpdate.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformProgress.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformProgress.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformProgress.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformProgress.java index 96920651035..73eacac8513 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformProgress.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformProgress.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformStats.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStats.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformStats.java index 578bed0d37f..ccf2a18fb34 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStats.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DataFrameTransformStats.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DestConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DestConfig.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DestConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DestConfig.java index f808fa86720..9dce70efe23 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DestConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/DestConfig.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/NodeAttributes.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/NodeAttributes.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/NodeAttributes.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/NodeAttributes.java index 85c2b9644c2..a4b2a6f1fd9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/NodeAttributes.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/NodeAttributes.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/QueryConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/QueryConfig.java similarity index 97% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/QueryConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/QueryConfig.java index 31b1fe48108..3f4727863a2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/QueryConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/QueryConfig.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/SourceConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/SourceConfig.java similarity index 99% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/SourceConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/SourceConfig.java index 7cafa5a8905..fa72bc32391 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/SourceConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/SourceConfig.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/SyncConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/SyncConfig.java similarity index 94% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/SyncConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/SyncConfig.java index 3ead35d0a49..c21418f3f75 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/SyncConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/SyncConfig.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.xcontent.ToXContentObject; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/TimeSyncConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/TimeSyncConfig.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/TimeSyncConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/TimeSyncConfig.java index 797ca3f8961..a31006df5e7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/TimeSyncConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/TimeSyncConfig.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.unit.TimeValue; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/AggregationConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/AggregationConfig.java similarity index 97% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/AggregationConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/AggregationConfig.java index ceb5f1e1247..ed87cb20ab7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/AggregationConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/AggregationConfig.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/DateHistogramGroupSource.java similarity index 99% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/DateHistogramGroupSource.java index c8fb885896d..6c775142539 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSource.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/DateHistogramGroupSource.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/GroupConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/GroupConfig.java similarity index 99% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/GroupConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/GroupConfig.java index 950d35e4054..22ee9682998 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/GroupConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/GroupConfig.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/HistogramGroupSource.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/HistogramGroupSource.java similarity index 98% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/HistogramGroupSource.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/HistogramGroupSource.java index 390e4c8b64a..c3e9f53d8d7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/HistogramGroupSource.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/HistogramGroupSource.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/PivotConfig.java similarity index 99% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/PivotConfig.java index 6fdbeb8a43a..1f01ebd0b30 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/PivotConfig.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/SingleGroupSource.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/SingleGroupSource.java similarity index 96% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/SingleGroupSource.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/SingleGroupSource.java index b1234277d8c..abd03620c87 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/SingleGroupSource.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/SingleGroupSource.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ToXContentObject; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/TermsGroupSource.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/TermsGroupSource.java similarity index 97% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/TermsGroupSource.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/TermsGroupSource.java index d1bacdbc0a6..885d86e7eeb 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/pivot/TermsGroupSource.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/pivot/TermsGroupSource.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContent; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/util/TimeUtil.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/util/TimeUtil.java similarity index 97% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/util/TimeUtil.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/util/TimeUtil.java index 2470c3f7a4a..e2d72f91e55 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/util/TimeUtil.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/transform/transforms/util/TimeUtil.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.client.dataframe.transforms.util; +package org.elasticsearch.client.transform.transforms.util; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider b/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider index dde81e43867..d558383dd14 100644 --- a/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider +++ b/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider @@ -1,4 +1,4 @@ -org.elasticsearch.client.dataframe.DataFrameNamedXContentProvider org.elasticsearch.client.indexlifecycle.IndexLifecycleNamedXContentProvider org.elasticsearch.client.ml.dataframe.MlDataFrameAnalysisNamedXContentProvider -org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider \ No newline at end of file +org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider +org.elasticsearch.client.transform.DataFrameNamedXContentProvider diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractRequestTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractRequestTestCase.java index 5436cdf1c37..2d5cb843706 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractRequestTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/AbstractRequestTestCase.java @@ -49,13 +49,20 @@ public abstract class AbstractRequestTestCase extends E final XContent xContent = XContentFactory.xContent(xContentType); final XContentParser parser = xContent.createParser( - NamedXContentRegistry.EMPTY, + xContentRegistry(), LoggingDeprecationHandler.INSTANCE, bytes.streamInput()); final S serverInstance = doParseToServerInstance(parser); assertInstances(serverInstance, clientTestInstance); } + /** + * The {@link NamedXContentRegistry} to use for this test. Subclasses may override this to have a more realistic registry. + */ + protected NamedXContentRegistry xContentRegistry() { + return NamedXContentRegistry.EMPTY; + } + /** * @return The client test instance to be serialized to xcontent as bytes */ diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java index 207d6c13d58..24d91922980 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java @@ -24,19 +24,19 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.core.PageParams; -import org.elasticsearch.client.dataframe.DataFrameNamedXContentProvider; -import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; -import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StartDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StopDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.UpdateDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigTests; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdateTests; +import org.elasticsearch.client.transform.DataFrameNamedXContentProvider; +import org.elasticsearch.client.transform.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformStatsRequest; +import org.elasticsearch.client.transform.PreviewDataFrameTransformRequest; +import org.elasticsearch.client.transform.PutDataFrameTransformRequest; +import org.elasticsearch.client.transform.StartDataFrameTransformRequest; +import org.elasticsearch.client.transform.StopDataFrameTransformRequest; +import org.elasticsearch.client.transform.UpdateDataFrameTransformRequest; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigUpdate; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigUpdateTests; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -49,7 +49,7 @@ import java.io.IOException; import java.util.Collections; import java.util.List; -import static org.elasticsearch.client.dataframe.GetDataFrameTransformRequest.ALLOW_NO_MATCH; +import static org.elasticsearch.client.transform.GetDataFrameTransformRequest.ALLOW_NO_MATCH; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasEntry; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index 7633aef964a..9b952a27ba0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -26,32 +26,32 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.core.PageParams; -import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; -import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.PreviewDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StartDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StartDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.StopDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StopDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.UpdateDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.UpdateDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStats; -import org.elasticsearch.client.dataframe.transforms.DestConfig; -import org.elasticsearch.client.dataframe.transforms.SourceConfig; -import org.elasticsearch.client.dataframe.transforms.TimeSyncConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.GroupConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.TermsGroupSource; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.CreateIndexResponse; +import org.elasticsearch.client.transform.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformResponse; +import org.elasticsearch.client.transform.GetDataFrameTransformStatsRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformStatsResponse; +import org.elasticsearch.client.transform.PreviewDataFrameTransformRequest; +import org.elasticsearch.client.transform.PreviewDataFrameTransformResponse; +import org.elasticsearch.client.transform.PutDataFrameTransformRequest; +import org.elasticsearch.client.transform.StartDataFrameTransformRequest; +import org.elasticsearch.client.transform.StartDataFrameTransformResponse; +import org.elasticsearch.client.transform.StopDataFrameTransformRequest; +import org.elasticsearch.client.transform.StopDataFrameTransformResponse; +import org.elasticsearch.client.transform.UpdateDataFrameTransformRequest; +import org.elasticsearch.client.transform.UpdateDataFrameTransformResponse; +import org.elasticsearch.client.transform.transforms.DataFrameIndexerTransformStats; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigUpdate; +import org.elasticsearch.client.transform.transforms.DataFrameTransformStats; +import org.elasticsearch.client.transform.transforms.DestConfig; +import org.elasticsearch.client.transform.transforms.SourceConfig; +import org.elasticsearch.client.transform.transforms.TimeSyncConfig; +import org.elasticsearch.client.transform.transforms.pivot.GroupConfig; +import org.elasticsearch.client.transform.transforms.pivot.PivotConfig; +import org.elasticsearch.client.transform.transforms.pivot.TermsGroupSource; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 0704aa9a186..f798d1ec422 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -167,20 +167,20 @@ import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.hasItem; -import static org.hamcrest.CoreMatchers.hasItems; -import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; public class MachineLearningIT extends ESRestHighLevelClientTestCase { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java index 73cca7827e7..90cfa3a9388 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java @@ -292,7 +292,7 @@ public class ReindexIT extends ESRestHighLevelClientTestCase { assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); assertEquals(Float.toString(requestsPerSecond), ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString()); - taskFinished.await(2, TimeUnit.SECONDS); + assertTrue(taskFinished.await(10, TimeUnit.SECONDS)); // any rethrottling after the update-by-query is done performed with the same taskId should result in a failure response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), @@ -423,7 +423,7 @@ public class ReindexIT extends ESRestHighLevelClientTestCase { assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); assertEquals(Float.toString(requestsPerSecond), ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString()); - taskFinished.await(2, TimeUnit.SECONDS); + assertTrue(taskFinished.await(10, TimeUnit.SECONDS)); // any rethrottling after the delete-by-query is done performed with the same taskId should result in a failure response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index d328200d630..57f6a579c70 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -74,6 +74,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.rankeval.PrecisionAtK; @@ -1194,13 +1195,12 @@ public class RequestConvertersTests extends ESTestCase { setRandomCountParams(countRequest, expectedParams); setRandomIndicesOptions(countRequest::indicesOptions, countRequest::indicesOptions, expectedParams); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - if (frequently()) { - if (randomBoolean()) { - searchSourceBuilder.minScore(randomFloat()); - } + if (randomBoolean()) { + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + countRequest.source(searchSourceBuilder); + } else { + countRequest.query(new MatchAllQueryBuilder()); } - countRequest.source(searchSourceBuilder); Request request = RequestConverters.count(countRequest); StringJoiner endpoint = new StringJoiner("/", "/", ""); String index = String.join(",", indices); @@ -1215,7 +1215,7 @@ public class RequestConvertersTests extends ESTestCase { assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); - assertToXContentBody(searchSourceBuilder, request.getEntity()); + assertToXContentBody(countRequest, request.getEntity()); } public void testCountNullIndicesAndTypes() { @@ -1234,6 +1234,14 @@ public class RequestConvertersTests extends ESTestCase { countRequest.preference(randomAlphaOfLengthBetween(3, 10)); expectedParams.put("preference", countRequest.preference()); } + if (randomBoolean()) { + countRequest.terminateAfter(randomIntBetween(0, Integer.MAX_VALUE)); + expectedParams.put("terminate_after", String.valueOf(countRequest.terminateAfter())); + } + if (randomBoolean()) { + countRequest.minScore((float) randomIntBetween(1, 10)); + expectedParams.put("min_score", String.valueOf(countRequest.minScore())); + } } public void testMultiSearch() throws IOException { @@ -1407,12 +1415,20 @@ public class RequestConvertersTests extends ESTestCase { multiSearchTemplateRequest.add(searchTemplateRequest); } + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + multiSearchTemplateRequest.maxConcurrentSearchRequests(randomIntBetween(1,10)); + expectedParams.put("max_concurrent_searches", Integer.toString(multiSearchTemplateRequest.maxConcurrentSearchRequests())); + } + expectedParams.put(RestSearchAction.TYPED_KEYS_PARAM, "true"); + Request multiRequest = RequestConverters.multiSearchTemplate(multiSearchTemplateRequest); assertEquals(HttpPost.METHOD_NAME, multiRequest.getMethod()); assertEquals("/_msearch/template", multiRequest.getEndpoint()); List searchRequests = multiSearchTemplateRequest.requests(); assertEquals(numSearchRequests, searchRequests.size()); + assertEquals(expectedParams, multiRequest.getParameters()); HttpEntity actualEntity = multiRequest.getEntity(); byte[] expectedBytes = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, XContentType.JSON.xContent()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index b1d5aa05925..f9bf69e865e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -45,8 +45,6 @@ import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.core.MainRequest; import org.elasticsearch.client.core.MainResponse; -import org.elasticsearch.client.dataframe.transforms.SyncConfig; -import org.elasticsearch.client.dataframe.transforms.TimeSyncConfig; import org.elasticsearch.client.indexlifecycle.AllocateAction; import org.elasticsearch.client.indexlifecycle.DeleteAction; import org.elasticsearch.client.indexlifecycle.ForceMergeAction; @@ -67,6 +65,8 @@ import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.Binar import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.ConfusionMatrixMetric; import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.PrecisionMetric; import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.RecallMetric; +import org.elasticsearch.client.transform.transforms.SyncConfig; +import org.elasticsearch.client.transform.transforms.TimeSyncConfig; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -901,7 +901,7 @@ public class RestHighLevelClientTests extends ESTestCase { private static void assertAsyncMethod(Map> methods, Method method, String apiName) { assertTrue("async method [" + method.getName() + "] doesn't have corresponding sync method", methods.containsKey(apiName.substring(0, apiName.length() - 6))); - assertThat("async method [" + method + "] should return void", method.getReturnType(), equalTo(Void.TYPE)); + assertThat("async method [" + method + "] should return Cancellable", method.getReturnType(), equalTo(Cancellable.class)); assertEquals("async method [" + method + "] should not throw any exceptions", 0, method.getExceptionTypes().length); if (APIS_WITHOUT_REQUEST_OBJECT.contains(apiName.replaceAll("_async$", ""))) { assertEquals(2, method.getParameterTypes().length); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 171a0cae9da..cf0772d8170 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -45,6 +45,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.ScriptQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; @@ -1352,9 +1353,14 @@ public class SearchIT extends ESRestHighLevelClientTestCase { } public void testCountMultipleIndicesMatchQueryUsingConstructor() throws IOException { - - SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(new MatchQueryBuilder("field", "value1")); - CountRequest countRequest = new CountRequest(new String[]{"index1", "index2", "index3"}, sourceBuilder); + CountRequest countRequest; + if (randomBoolean()) { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(new MatchQueryBuilder("field", "value1")); + countRequest = new CountRequest(new String[]{"index1", "index2", "index3"}, sourceBuilder); + } else { + QueryBuilder query = new MatchQueryBuilder("field", "value1"); + countRequest = new CountRequest(new String[]{"index1", "index2", "index3"}, query); + } CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); assertCountHeader(countResponse); assertEquals(3, countResponse.getCount()); @@ -1362,9 +1368,12 @@ public class SearchIT extends ESRestHighLevelClientTestCase { } public void testCountMultipleIndicesMatchQuery() throws IOException { - CountRequest countRequest = new CountRequest("index1", "index2", "index3"); - countRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("field", "value1"))); + if (randomBoolean()) { + countRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("field", "value1"))); + } else { + countRequest.query(new MatchQueryBuilder("field", "value1")); + } CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); assertCountHeader(countResponse); assertEquals(3, countResponse.getCount()); @@ -1378,7 +1387,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase { assertCountHeader(countResponse); assertEquals(3, countResponse.getCount()); } - + public void testSearchWithBasicLicensedQuery() throws IOException { SearchRequest searchRequest = new SearchRequest("index"); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); @@ -1390,7 +1399,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase { assertFirstHit(searchResponse, hasId("2")); assertSecondHit(searchResponse, hasId("1")); } - + private static void assertCountHeader(CountResponse countResponse) { assertEquals(0, countResponse.getSkippedShards()); assertEquals(0, countResponse.getFailedShards()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountRequestTests.java index 1030f4401e1..11821755da6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountRequestTests.java @@ -20,18 +20,56 @@ package org.elasticsearch.client.core; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.AbstractRequestTestCase; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.rest.action.RestActions; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.test.ESTestCase; +import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; +import static org.hamcrest.Matchers.equalTo; -//similar to SearchRequestTests as CountRequest inline several members (and functionality) from SearchRequest -public class CountRequestTests extends ESTestCase { +// similar to SearchRequestTests as CountRequest inline several members (and functionality) from SearchRequest +// In RestCountAction the request body is parsed as QueryBuilder (top level query field), +// so that is why this is chosen as server side instance. +public class CountRequestTests extends AbstractRequestTestCase { + + @Override + protected CountRequest createClientTestInstance() { + CountRequest countRequest = new CountRequest(); + // query is the only property that is serialized as xcontent: + if (randomBoolean()) { + countRequest.query(new MatchAllQueryBuilder()); + } + return countRequest; + } + + @Override + protected QueryBuilder doParseToServerInstance(XContentParser parser) throws IOException { + return RestActions.getQueryContent(parser); + } + + @Override + protected void assertInstances(QueryBuilder serverInstance, CountRequest clientTestInstance) { + // query is the only property that is serialized as xcontent: + assertThat(serverInstance, equalTo(clientTestInstance.query())); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedXContents()); + } public void testIllegalArguments() { CountRequest countRequest = new CountRequest(); @@ -55,6 +93,8 @@ public class CountRequestTests extends ESTestCase { e = expectThrows(NullPointerException.class, () -> countRequest.source(null)); assertEquals("source must not be null", e.getMessage()); + e = expectThrows(NullPointerException.class, () -> countRequest.query(null)); + assertEquals("query must not be null", e.getMessage()); } public void testEqualsAndHashcode() { @@ -63,7 +103,11 @@ public class CountRequestTests extends ESTestCase { private CountRequest createCountRequest() { CountRequest countRequest = new CountRequest("index"); - countRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10))); + if (randomBoolean()) { + countRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10))); + } else { + countRequest.query(new MatchQueryBuilder("num", 10)); + } return countRequest; } @@ -76,6 +120,10 @@ public class CountRequestTests extends ESTestCase { mutators.add(() -> mutation.types(ArrayUtils.concat(countRequest.types(), new String[]{randomAlphaOfLength(10)}))); mutators.add(() -> mutation.preference(randomValueOtherThan(countRequest.preference(), () -> randomAlphaOfLengthBetween(3, 10)))); mutators.add(() -> mutation.routing(randomValueOtherThan(countRequest.routing(), () -> randomAlphaOfLengthBetween(3, 10)))); + mutators.add(() -> mutation.terminateAfter(randomValueOtherThan(countRequest.terminateAfter(), () -> randomIntBetween(0, 10)))); + mutators.add(() -> mutation.minScore(randomValueOtherThan(countRequest.minScore(), () -> (float) randomIntBetween(0, 10)))); + mutators.add(() -> mutation.query(randomValueOtherThan(countRequest.query(), + () -> new MatchQueryBuilder(randomAlphaOfLength(4), randomAlphaOfLength(4))))); randomFrom(mutators).run(); return mutation; } @@ -87,9 +135,11 @@ public class CountRequestTests extends ESTestCase { result.types(countRequest.types()); result.routing(countRequest.routing()); result.preference(countRequest.preference()); - if (countRequest.source() != null) { - result.source(countRequest.source()); + if (countRequest.query() != null) { + result.query(countRequest.query()); } + result.terminateAfter(countRequest.terminateAfter()); + result.minScore(countRequest.minScore()); return result; } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index 8dedfd83af7..723594bf498 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -26,36 +26,36 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.core.PageParams; -import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; -import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.PreviewDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StartDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StartDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.StopDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StopDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.UpdateDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.UpdateDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformProgress; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStats; -import org.elasticsearch.client.dataframe.transforms.DestConfig; -import org.elasticsearch.client.dataframe.transforms.NodeAttributes; -import org.elasticsearch.client.dataframe.transforms.QueryConfig; -import org.elasticsearch.client.dataframe.transforms.SourceConfig; -import org.elasticsearch.client.dataframe.transforms.TimeSyncConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.AggregationConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.GroupConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.TermsGroupSource; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.CreateIndexResponse; +import org.elasticsearch.client.transform.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformResponse; +import org.elasticsearch.client.transform.GetDataFrameTransformStatsRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformStatsResponse; +import org.elasticsearch.client.transform.PreviewDataFrameTransformRequest; +import org.elasticsearch.client.transform.PreviewDataFrameTransformResponse; +import org.elasticsearch.client.transform.PutDataFrameTransformRequest; +import org.elasticsearch.client.transform.StartDataFrameTransformRequest; +import org.elasticsearch.client.transform.StartDataFrameTransformResponse; +import org.elasticsearch.client.transform.StopDataFrameTransformRequest; +import org.elasticsearch.client.transform.StopDataFrameTransformResponse; +import org.elasticsearch.client.transform.UpdateDataFrameTransformRequest; +import org.elasticsearch.client.transform.UpdateDataFrameTransformResponse; +import org.elasticsearch.client.transform.transforms.DataFrameIndexerTransformStats; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigUpdate; +import org.elasticsearch.client.transform.transforms.DataFrameTransformProgress; +import org.elasticsearch.client.transform.transforms.DataFrameTransformStats; +import org.elasticsearch.client.transform.transforms.DestConfig; +import org.elasticsearch.client.transform.transforms.NodeAttributes; +import org.elasticsearch.client.transform.transforms.QueryConfig; +import org.elasticsearch.client.transform.transforms.SourceConfig; +import org.elasticsearch.client.transform.transforms.TimeSyncConfig; +import org.elasticsearch.client.transform.transforms.pivot.AggregationConfig; +import org.elasticsearch.client.transform.transforms.pivot.GroupConfig; +import org.elasticsearch.client.transform.transforms.pivot.PivotConfig; +import org.elasticsearch.client.transform.transforms.pivot.TermsGroupSource; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -121,39 +121,39 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest RestHighLevelClient client = highLevelClient(); - // tag::put-data-frame-transform-query-config + // tag::put-transform-query-config QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); - // end::put-data-frame-transform-query-config - // tag::put-data-frame-transform-source-config + // end::put-transform-query-config + // tag::put-transform-source-config SourceConfig sourceConfig = SourceConfig.builder() .setIndex("source-index") .setQueryConfig(queryConfig).build(); - // end::put-data-frame-transform-source-config - // tag::put-data-frame-transform-dest-config + // end::put-transform-source-config + // tag::put-transform-dest-config DestConfig destConfig = DestConfig.builder() .setIndex("pivot-destination") .setPipeline("my-pipeline").build(); - // end::put-data-frame-transform-dest-config - // tag::put-data-frame-transform-group-config + // end::put-transform-dest-config + // tag::put-transform-group-config GroupConfig groupConfig = GroupConfig.builder() .groupBy("reviewer", // <1> TermsGroupSource.builder().setField("user_id").build()) // <2> .build(); - // end::put-data-frame-transform-group-config - // tag::put-data-frame-transform-agg-config + // end::put-transform-group-config + // tag::put-transform-agg-config AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); aggBuilder.addAggregator( AggregationBuilders.avg("avg_rating").field("stars")); // <1> AggregationConfig aggConfig = new AggregationConfig(aggBuilder); - // end::put-data-frame-transform-agg-config - // tag::put-data-frame-transform-pivot-config + // end::put-transform-agg-config + // tag::put-transform-pivot-config PivotConfig pivotConfig = PivotConfig.builder() .setGroups(groupConfig) // <1> .setAggregationConfig(aggConfig) // <2> .setMaxPageSearchSize(1000) // <3> .build(); - // end::put-data-frame-transform-pivot-config - // tag::put-data-frame-transform-config + // end::put-transform-pivot-config + // tag::put-transform-config DataFrameTransformConfig transformConfig = DataFrameTransformConfig .builder() .setId("reviewer-avg-rating") // <1> @@ -163,20 +163,20 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest .setPivotConfig(pivotConfig) // <5> .setDescription("This is my test transform") // <6> .build(); - // end::put-data-frame-transform-config + // end::put-transform-config { - // tag::put-data-frame-transform-request + // tag::put-transform-request PutDataFrameTransformRequest request = new PutDataFrameTransformRequest(transformConfig); // <1> request.setDeferValidation(false); // <2> - // end::put-data-frame-transform-request + // end::put-transform-request - // tag::put-data-frame-transform-execute + // tag::put-transform-execute AcknowledgedResponse response = client.dataFrame().putDataFrameTransform( request, RequestOptions.DEFAULT); - // end::put-data-frame-transform-execute + // end::put-transform-execute transformsToClean.add(request.getConfig().getId()); assertTrue(response.isAcknowledged()); @@ -190,7 +190,7 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest .build(); PutDataFrameTransformRequest request = new PutDataFrameTransformRequest(configWithDifferentId); - // tag::put-data-frame-transform-execute-listener + // tag::put-transform-execute-listener ActionListener listener = new ActionListener() { @Override @@ -203,16 +203,16 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest // <2> } }; - // end::put-data-frame-transform-execute-listener + // end::put-transform-execute-listener // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); listener = new LatchedActionListener<>(listener, latch); - // tag::put-data-frame-transform-execute-async + // tag::put-transform-execute-async client.dataFrame().putDataFrameTransformAsync( request, RequestOptions.DEFAULT, listener); // <1> - // end::put-data-frame-transform-execute-async + // end::put-transform-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); transformsToClean.add(request.getConfig().getId()); @@ -242,7 +242,7 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest client.dataFrame().putDataFrameTransform(new PutDataFrameTransformRequest(transformConfig), RequestOptions.DEFAULT); transformsToClean.add(transformConfig.getId()); - // tag::update-data-frame-transform-config + // tag::update-transform-config DataFrameTransformConfigUpdate update = DataFrameTransformConfigUpdate .builder() .setSource(SourceConfig.builder() @@ -256,24 +256,24 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest TimeValue.timeValueSeconds(120))) // <4> .setDescription("This is my updated transform") // <5> .build(); - // end::update-data-frame-transform-config + // end::update-transform-config { - // tag::update-data-frame-transform-request + // tag::update-transform-request UpdateDataFrameTransformRequest request = new UpdateDataFrameTransformRequest( update, // <1> "my-transform-to-update"); // <2> request.setDeferValidation(false); // <3> - // end::update-data-frame-transform-request + // end::update-transform-request - // tag::update-data-frame-transform-execute + // tag::update-transform-execute UpdateDataFrameTransformResponse response = client.dataFrame().updateDataFrameTransform(request, RequestOptions.DEFAULT); DataFrameTransformConfig updatedConfig = response.getTransformConfiguration(); - // end::update-data-frame-transform-execute + // end::update-transform-execute assertThat(updatedConfig.getDescription(), equalTo("This is my updated transform")); } @@ -281,7 +281,7 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest UpdateDataFrameTransformRequest request = new UpdateDataFrameTransformRequest(update, "my-transform-to-update"); - // tag::update-data-frame-transform-execute-listener + // tag::update-transform-execute-listener ActionListener listener = new ActionListener() { @Override @@ -294,16 +294,16 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest // <2> } }; - // end::update-data-frame-transform-execute-listener + // end::update-transform-execute-listener // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); listener = new LatchedActionListener<>(listener, latch); - // tag::update-data-frame-transform-execute-async + // tag::update-transform-execute-async client.dataFrame().updateDataFrameTransformAsync( request, RequestOptions.DEFAULT, listener); // <1> - // end::update-data-frame-transform-execute-async + // end::update-transform-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); } @@ -333,45 +333,45 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest transformsToClean.add(transformConfig.getId()); { - // tag::start-data-frame-transform-request + // tag::start-transform-request StartDataFrameTransformRequest request = new StartDataFrameTransformRequest("mega-transform"); // <1> - // end::start-data-frame-transform-request + // end::start-transform-request - // tag::start-data-frame-transform-request-options + // tag::start-transform-request-options request.setTimeout(TimeValue.timeValueSeconds(20)); // <1> - // end::start-data-frame-transform-request-options + // end::start-transform-request-options - // tag::start-data-frame-transform-execute + // tag::start-transform-execute StartDataFrameTransformResponse response = client.dataFrame().startDataFrameTransform( request, RequestOptions.DEFAULT); - // end::start-data-frame-transform-execute + // end::start-transform-execute assertTrue(response.isAcknowledged()); } { - // tag::stop-data-frame-transform-request + // tag::stop-transform-request StopDataFrameTransformRequest request = new StopDataFrameTransformRequest("mega-transform"); // <1> - // end::stop-data-frame-transform-request + // end::stop-transform-request - // tag::stop-data-frame-transform-request-options + // tag::stop-transform-request-options request.setWaitForCompletion(Boolean.TRUE); // <1> request.setTimeout(TimeValue.timeValueSeconds(30)); // <2> request.setAllowNoMatch(true); // <3> - // end::stop-data-frame-transform-request-options + // end::stop-transform-request-options - // tag::stop-data-frame-transform-execute + // tag::stop-transform-execute StopDataFrameTransformResponse response = client.dataFrame().stopDataFrameTransform( request, RequestOptions.DEFAULT); - // end::stop-data-frame-transform-execute + // end::stop-transform-execute assertTrue(response.isAcknowledged()); } { - // tag::start-data-frame-transform-execute-listener + // tag::start-transform-execute-listener ActionListener listener = new ActionListener() { @Override @@ -385,22 +385,22 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest // <2> } }; - // end::start-data-frame-transform-execute-listener + // end::start-transform-execute-listener // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); listener = new LatchedActionListener<>(listener, latch); StartDataFrameTransformRequest request = new StartDataFrameTransformRequest("mega-transform"); - // tag::start-data-frame-transform-execute-async + // tag::start-transform-execute-async client.dataFrame().startDataFrameTransformAsync( request, RequestOptions.DEFAULT, listener); // <1> - // end::start-data-frame-transform-execute-async + // end::start-transform-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); } { - // tag::stop-data-frame-transform-execute-listener + // tag::stop-transform-execute-listener ActionListener listener = new ActionListener() { @Override @@ -414,17 +414,17 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest // <2> } }; - // end::stop-data-frame-transform-execute-listener + // end::stop-transform-execute-listener // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); listener = new LatchedActionListener<>(listener, latch); StopDataFrameTransformRequest request = new StopDataFrameTransformRequest("mega-transform"); - // tag::stop-data-frame-transform-execute-async + // tag::stop-transform-execute-async client.dataFrame().stopDataFrameTransformAsync( request, RequestOptions.DEFAULT, listener); // <1> - // end::stop-data-frame-transform-execute-async + // end::stop-transform-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); } @@ -465,22 +465,22 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest client.dataFrame().putDataFrameTransform(new PutDataFrameTransformRequest(transformConfig2), RequestOptions.DEFAULT); { - // tag::delete-data-frame-transform-request + // tag::delete-transform-request DeleteDataFrameTransformRequest request = new DeleteDataFrameTransformRequest("mega-transform"); // <1> request.setForce(false); // <2> - // end::delete-data-frame-transform-request + // end::delete-transform-request - // tag::delete-data-frame-transform-execute + // tag::delete-transform-execute AcknowledgedResponse response = client.dataFrame() .deleteDataFrameTransform(request, RequestOptions.DEFAULT); - // end::delete-data-frame-transform-execute + // end::delete-transform-execute assertTrue(response.isAcknowledged()); } { - // tag::delete-data-frame-transform-execute-listener + // tag::delete-transform-execute-listener ActionListener listener = new ActionListener() { @Override @@ -493,7 +493,7 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest // <2> } }; - // end::delete-data-frame-transform-execute-listener + // end::delete-transform-execute-listener // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); @@ -501,10 +501,10 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest DeleteDataFrameTransformRequest request = new DeleteDataFrameTransformRequest("mega-transform2"); - // tag::delete-data-frame-transform-execute-async + // tag::delete-transform-execute-async client.dataFrame().deleteDataFrameTransformAsync( request, RequestOptions.DEFAULT, listener); // <1> - // end::delete-data-frame-transform-execute-async + // end::delete-transform-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); } @@ -523,7 +523,7 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest AggregationConfig aggConfig = new AggregationConfig(aggBuilder); PivotConfig pivotConfig = PivotConfig.builder().setGroups(groupConfig).setAggregationConfig(aggConfig).build(); - // tag::preview-data-frame-transform-request + // tag::preview-transform-request DataFrameTransformConfig transformConfig = DataFrameTransformConfig.forPreview( SourceConfig.builder() @@ -534,20 +534,20 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest PreviewDataFrameTransformRequest request = new PreviewDataFrameTransformRequest(transformConfig); // <3> - // end::preview-data-frame-transform-request + // end::preview-transform-request { - // tag::preview-data-frame-transform-execute + // tag::preview-transform-execute PreviewDataFrameTransformResponse response = client.dataFrame() .previewDataFrameTransform(request, RequestOptions.DEFAULT); - // end::preview-data-frame-transform-execute + // end::preview-transform-execute assertNotNull(response.getDocs()); assertNotNull(response.getMappings()); } { - // tag::preview-data-frame-transform-execute-listener + // tag::preview-transform-execute-listener ActionListener listener = new ActionListener() { @Override @@ -560,16 +560,16 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest // <2> } }; - // end::preview-data-frame-transform-execute-listener + // end::preview-transform-execute-listener // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); listener = new LatchedActionListener<>(listener, latch); - // tag::preview-data-frame-transform-execute-async + // tag::preview-transform-execute-async client.dataFrame().previewDataFrameTransformAsync( request, RequestOptions.DEFAULT, listener); // <1> - // end::preview-data-frame-transform-execute-async + // end::preview-transform-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); } @@ -600,26 +600,26 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest client.dataFrame().putDataFrameTransform(new PutDataFrameTransformRequest(transformConfig), RequestOptions.DEFAULT); transformsToClean.add(id); - // tag::get-data-frame-transform-stats-request + // tag::get-transform-stats-request GetDataFrameTransformStatsRequest request = new GetDataFrameTransformStatsRequest(id); // <1> - // end::get-data-frame-transform-stats-request + // end::get-transform-stats-request - // tag::get-data-frame-transform-stats-request-options + // tag::get-transform-stats-request-options request.setPageParams(new PageParams(0, 100)); // <1> request.setAllowNoMatch(true); // <2> - // end::get-data-frame-transform-stats-request-options + // end::get-transform-stats-request-options { - // tag::get-data-frame-transform-stats-execute + // tag::get-transform-stats-execute GetDataFrameTransformStatsResponse response = client.dataFrame() .getDataFrameTransformStats(request, RequestOptions.DEFAULT); - // end::get-data-frame-transform-stats-execute + // end::get-transform-stats-execute assertThat(response.getTransformsStats(), hasSize(1)); - // tag::get-data-frame-transform-stats-response + // tag::get-transform-stats-response DataFrameTransformStats stats = response.getTransformsStats().get(0); // <1> DataFrameTransformStats.State state = @@ -631,14 +631,14 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest .getNext().getCheckpointProgress(); // <4> NodeAttributes node = stats.getNode(); // <5> - // end::get-data-frame-transform-stats-response + // end::get-transform-stats-response assertEquals(DataFrameTransformStats.State.STOPPED, state); assertNotNull(indexerStats); assertNull(progress); } { - // tag::get-data-frame-transform-stats-execute-listener + // tag::get-transform-stats-execute-listener ActionListener listener = new ActionListener() { @Override @@ -652,16 +652,16 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest // <2> } }; - // end::get-data-frame-transform-stats-execute-listener + // end::get-transform-stats-execute-listener // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); listener = new LatchedActionListener<>(listener, latch); - // tag::get-data-frame-transform-stats-execute-async + // tag::get-transform-stats-execute-async client.dataFrame().getDataFrameTransformStatsAsync( request, RequestOptions.DEFAULT, listener); // <1> - // end::get-data-frame-transform-stats-execute-async + // end::get-transform-stats-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); } @@ -694,31 +694,31 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest transformsToClean.add(putTransformConfig.getId()); { - // tag::get-data-frame-transform-request + // tag::get-transform-request GetDataFrameTransformRequest request = new GetDataFrameTransformRequest("mega-transform"); // <1> - // end::get-data-frame-transform-request + // end::get-transform-request - // tag::get-data-frame-transform-request-options + // tag::get-transform-request-options request.setPageParams(new PageParams(0, 100)); // <1> request.setAllowNoMatch(true); // <2> - // end::get-data-frame-transform-request-options + // end::get-transform-request-options - // tag::get-data-frame-transform-execute + // tag::get-transform-execute GetDataFrameTransformResponse response = client.dataFrame() .getDataFrameTransform(request, RequestOptions.DEFAULT); - // end::get-data-frame-transform-execute + // end::get-transform-execute - // tag::get-data-frame-transform-response + // tag::get-transform-response List transformConfigs = response.getTransformConfigurations(); - // end::get-data-frame-transform-response + // end::get-transform-response assertEquals(1, transformConfigs.size()); } { - // tag::get-data-frame-transform-execute-listener + // tag::get-transform-execute-listener ActionListener listener = new ActionListener() { @Override @@ -731,7 +731,7 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest // <2> } }; - // end::get-data-frame-transform-execute-listener + // end::get-transform-execute-listener // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); @@ -739,10 +739,10 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest GetDataFrameTransformRequest request = new GetDataFrameTransformRequest("mega-transform"); - // tag::get-data-frame-transform-execute-async + // tag::get-transform-execute-async client.dataFrame().getDataFrameTransformAsync( request, RequestOptions.DEFAULT, listener); // <1> - // end::get-data-frame-transform-execute-async + // end::get-transform-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java index 2d876905dfb..7805073f06f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java @@ -59,10 +59,14 @@ import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyResponse; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyResponse; +import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsRequest; +import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsResponse; import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.SnapshotInvocationRecord; import org.elasticsearch.client.slm.SnapshotLifecyclePolicy; import org.elasticsearch.client.slm.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.client.slm.SnapshotLifecycleStats; +import org.elasticsearch.client.slm.SnapshotRetentionConfiguration; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -88,6 +92,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { @@ -773,8 +778,11 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { // tag::slm-put-snapshot-lifecycle-policy Map config = new HashMap<>(); config.put("indices", Collections.singletonList("idx")); + SnapshotRetentionConfiguration retention = + new SnapshotRetentionConfiguration(TimeValue.timeValueDays(30), 2, 10); SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( - "policy_id", "name", "1 2 3 * * ?", "my_repository", config); + "policy_id", "name", "1 2 3 * * ?", + "my_repository", config, retention); PutSnapshotLifecyclePolicyRequest request = new PutSnapshotLifecyclePolicyRequest(policy); // end::slm-put-snapshot-lifecycle-policy @@ -866,7 +874,6 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { assertNotNull(policyMeta); assertThat(retrievedPolicy, equalTo(policy)); - assertThat(policyVersion, equalTo(1L)); createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); @@ -933,6 +940,22 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { // end::slm-execute-snapshot-lifecycle-policy-execute-async latch.await(5, TimeUnit.SECONDS); + // tag::slm-get-snapshot-lifecycle-stats + GetSnapshotLifecycleStatsRequest getStatsRequest = + new GetSnapshotLifecycleStatsRequest(); + // end::slm-get-snapshot-lifecycle-stats + + // tag::slm-get-snapshot-lifecycle-stats-execute + GetSnapshotLifecycleStatsResponse statsResp = client.indexLifecycle() + .getSnapshotLifecycleStats(getStatsRequest, RequestOptions.DEFAULT); + SnapshotLifecycleStats stats = statsResp.getStats(); + SnapshotLifecycleStats.SnapshotPolicyStats policyStats = + stats.getMetrics().get("policy_id"); + // end::slm-get-snapshot-lifecycle-stats-execute + assertThat( + statsResp.getStats().getMetrics().get("policy_id").getSnapshotsTaken(), + greaterThanOrEqualTo(1L)); + //////// DELETE // tag::slm-delete-snapshot-lifecycle-policy DeleteSnapshotLifecyclePolicyRequest deleteRequest = diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/AcknowledgedTasksResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/AcknowledgedTasksResponseTests.java similarity index 99% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/AcknowledgedTasksResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/AcknowledgedTasksResponseTests.java index df7fcc14b95..b12cea0b897 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/AcknowledgedTasksResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/AcknowledgedTasksResponseTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/DeleteDataFrameTransformRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/DeleteDataFrameTransformRequestTests.java similarity index 96% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/DeleteDataFrameTransformRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/DeleteDataFrameTransformRequestTests.java index 390d90361c5..dd20d513970 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/DeleteDataFrameTransformRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/DeleteDataFrameTransformRequestTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.test.ESTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/GetDataFrameTransformRequestTests.java similarity index 96% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/GetDataFrameTransformRequestTests.java index 818eea4520a..044d880f8e8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/GetDataFrameTransformRequestTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.test.ESTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/GetDataFrameTransformResponseTests.java similarity index 95% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/GetDataFrameTransformResponseTests.java index 2bedb7d095f..90c2c286077 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/GetDataFrameTransformResponseTests.java @@ -17,10 +17,10 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigTests; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/GetDataFrameTransformStatsRequestTests.java similarity index 96% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/GetDataFrameTransformStatsRequestTests.java index 300a8180168..68fd9ff8853 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/GetDataFrameTransformStatsRequestTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.test.ESTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/GetDataFrameTransformStatsResponseTests.java similarity index 96% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/GetDataFrameTransformStatsResponseTests.java index 3a10a162ea9..d903215284a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformStatsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/GetDataFrameTransformStatsResponseTests.java @@ -17,12 +17,12 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStats; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStatsTests; +import org.elasticsearch.client.transform.transforms.DataFrameTransformStats; +import org.elasticsearch.client.transform.transforms.DataFrameTransformStatsTests; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/PreviewDataFrameTransformRequestTests.java similarity index 92% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/PreviewDataFrameTransformRequestTests.java index 45d5d879d47..d4e98685832 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/PreviewDataFrameTransformRequestTests.java @@ -17,12 +17,12 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.ValidationException; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigTests; -import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfigTests; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.client.transform.transforms.pivot.PivotConfigTests; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; @@ -34,7 +34,7 @@ import java.util.Collections; import java.util.List; import java.util.Optional; -import static org.elasticsearch.client.dataframe.transforms.SourceConfigTests.randomSourceConfig; +import static org.elasticsearch.client.transform.transforms.SourceConfigTests.randomSourceConfig; import static org.hamcrest.Matchers.containsString; public class PreviewDataFrameTransformRequestTests extends AbstractXContentTestCase { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/PreviewDataFrameTransformResponseTests.java similarity index 98% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/PreviewDataFrameTransformResponseTests.java index 28b7e52aac1..c8f852c68c9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PreviewDataFrameTransformResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/PreviewDataFrameTransformResponseTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/PutDataFrameTransformRequestTests.java similarity index 93% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/PutDataFrameTransformRequestTests.java index 7c7cd3fa151..19af4bfe5a0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/PutDataFrameTransformRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/PutDataFrameTransformRequestTests.java @@ -17,12 +17,12 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.ValidationException; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigTests; -import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfigTests; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.client.transform.transforms.pivot.PivotConfigTests; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/StartDataFrameTransformRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/StartDataFrameTransformRequestTests.java similarity index 97% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/StartDataFrameTransformRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/StartDataFrameTransformRequestTests.java index 6db4d1cd36f..f4950fc057a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/StartDataFrameTransformRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/StartDataFrameTransformRequestTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.ValidationException; import org.elasticsearch.test.ESTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/StopDataFrameTransformRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/StopDataFrameTransformRequestTests.java similarity index 97% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/StopDataFrameTransformRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/StopDataFrameTransformRequestTests.java index 56b22d57c07..5b28983c086 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/StopDataFrameTransformRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/StopDataFrameTransformRequestTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.ValidationException; import org.elasticsearch.test.ESTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/UpdateDataFrameTransformRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/UpdateDataFrameTransformRequestTests.java similarity index 94% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/UpdateDataFrameTransformRequestTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/UpdateDataFrameTransformRequestTests.java index c9f43a44bd7..7944b486c9e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/UpdateDataFrameTransformRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/UpdateDataFrameTransformRequestTests.java @@ -17,10 +17,10 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; import org.elasticsearch.client.ValidationException; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigUpdate; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; @@ -32,7 +32,7 @@ import java.util.Collections; import java.util.List; import java.util.Optional; -import static org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdateTests.randomDataFrameTransformConfigUpdate; +import static org.elasticsearch.client.transform.transforms.DataFrameTransformConfigUpdateTests.randomDataFrameTransformConfigUpdate; import static org.hamcrest.Matchers.containsString; public class UpdateDataFrameTransformRequestTests extends AbstractXContentTestCase { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/UpdateDataFrameTransformResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/UpdateDataFrameTransformResponseTests.java similarity index 95% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/UpdateDataFrameTransformResponseTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/UpdateDataFrameTransformResponseTests.java index efbb1ef4672..a2b20d4516d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/UpdateDataFrameTransformResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/UpdateDataFrameTransformResponseTests.java @@ -17,9 +17,9 @@ * under the License. */ -package org.elasticsearch.client.dataframe; +package org.elasticsearch.client.transform; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigTests; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerPositionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameIndexerPositionTests.java similarity index 98% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerPositionTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameIndexerPositionTests.java index cd17dd3fe8e..e4d1c505532 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerPositionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameIndexerPositionTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerTransformStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameIndexerTransformStatsTests.java similarity index 98% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerTransformStatsTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameIndexerTransformStatsTests.java index 75557caea2f..f6174815aa4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameIndexerTransformStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameIndexerTransformStatsTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.client.core.IndexerJobStats; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformCheckpointStatsTests.java similarity index 98% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformCheckpointStatsTests.java index ec7e8b6422e..d03651170f7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformCheckpointStatsTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformCheckpointingInfoTests.java similarity index 98% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformCheckpointingInfoTests.java index 23ac6748898..2ec042a4a6a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformCheckpointingInfoTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformConfigTests.java similarity index 91% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformConfigTests.java index 88191809e22..a70a580c620 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformConfigTests.java @@ -17,11 +17,11 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; -import org.elasticsearch.client.dataframe.DataFrameNamedXContentProvider; import org.elasticsearch.Version; -import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfigTests; +import org.elasticsearch.client.transform.DataFrameNamedXContentProvider; +import org.elasticsearch.client.transform.transforms.pivot.PivotConfigTests; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -35,8 +35,8 @@ import java.util.Collections; import java.util.List; import java.util.function.Predicate; -import static org.elasticsearch.client.dataframe.transforms.DestConfigTests.randomDestConfig; -import static org.elasticsearch.client.dataframe.transforms.SourceConfigTests.randomSourceConfig; +import static org.elasticsearch.client.transform.transforms.DestConfigTests.randomDestConfig; +import static org.elasticsearch.client.transform.transforms.SourceConfigTests.randomSourceConfig; public class DataFrameTransformConfigTests extends AbstractXContentTestCase { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigUpdateTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformConfigUpdateTests.java similarity index 92% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigUpdateTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformConfigUpdateTests.java index e4d69a84968..7d5cca0e60b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfigUpdateTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformConfigUpdateTests.java @@ -17,9 +17,9 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; -import org.elasticsearch.client.dataframe.DataFrameNamedXContentProvider; +import org.elasticsearch.client.transform.DataFrameNamedXContentProvider; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -31,8 +31,8 @@ import java.io.IOException; import java.util.Collections; import java.util.List; -import static org.elasticsearch.client.dataframe.transforms.DestConfigTests.randomDestConfig; -import static org.elasticsearch.client.dataframe.transforms.SourceConfigTests.randomSourceConfig; +import static org.elasticsearch.client.transform.transforms.DestConfigTests.randomDestConfig; +import static org.elasticsearch.client.transform.transforms.SourceConfigTests.randomSourceConfig; public class DataFrameTransformConfigUpdateTests extends AbstractXContentTestCase { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformProgressTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformProgressTests.java similarity index 98% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformProgressTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformProgressTests.java index 3792d4855db..faf6805ac27 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformProgressTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformProgressTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformStatsTests.java similarity index 98% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStatsTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformStatsTests.java index ae252069c61..4b3658f6ea1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DataFrameTransformStatsTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DestConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DestConfigTests.java similarity index 96% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DestConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DestConfigTests.java index 0dc8f99d763..ce2fe8538aa 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/DestConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/DestConfigTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/NodeAttributesTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/NodeAttributesTests.java similarity index 97% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/NodeAttributesTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/NodeAttributesTests.java index 661aa9f7a30..1ca1bf65f6c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/NodeAttributesTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/NodeAttributesTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/QueryConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/QueryConfigTests.java similarity index 97% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/QueryConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/QueryConfigTests.java index 644858cab28..e8d3a6a2ad1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/QueryConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/QueryConfigTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/SourceConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/SourceConfigTests.java similarity index 97% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/SourceConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/SourceConfigTests.java index 722c265c9a0..dede3d2f256 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/SourceConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/SourceConfigTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/TimeSyncConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/TimeSyncConfigTests.java similarity index 96% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/TimeSyncConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/TimeSyncConfigTests.java index dd2a17eb026..10437a3154c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/TimeSyncConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/TimeSyncConfigTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms; +package org.elasticsearch.client.transform.transforms; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerPositionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameIndexerPositionTests.java similarity index 72% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerPositionTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameIndexerPositionTests.java index 620629f1760..a26b94482ae 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerPositionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameIndexerPositionTests.java @@ -17,12 +17,12 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.hlrc; +package org.elasticsearch.client.transform.transforms.hlrc; import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; import java.util.LinkedHashMap; import java.util.Map; @@ -30,34 +30,34 @@ import java.util.Map; import static org.hamcrest.Matchers.equalTo; public class DataFrameIndexerPositionTests extends AbstractResponseTestCase< - DataFrameIndexerPosition, - org.elasticsearch.client.dataframe.transforms.DataFrameIndexerPosition> { + TransformIndexerPosition, + org.elasticsearch.client.transform.transforms.DataFrameIndexerPosition> { - public static DataFrameIndexerPosition fromHlrc( - org.elasticsearch.client.dataframe.transforms.DataFrameIndexerPosition instance) { + public static TransformIndexerPosition fromHlrc( + org.elasticsearch.client.transform.transforms.DataFrameIndexerPosition instance) { if (instance == null) { return null; } - return new DataFrameIndexerPosition(instance.getIndexerPosition(), instance.getBucketsPosition()); + return new TransformIndexerPosition(instance.getIndexerPosition(), instance.getBucketsPosition()); } - public static DataFrameIndexerPosition randomDataFrameIndexerPosition() { - return new DataFrameIndexerPosition(randomPositionMap(), randomPositionMap()); + public static TransformIndexerPosition randomDataFrameIndexerPosition() { + return new TransformIndexerPosition(randomPositionMap(), randomPositionMap()); } @Override - protected DataFrameIndexerPosition createServerTestInstance(XContentType xContentType) { + protected TransformIndexerPosition createServerTestInstance(XContentType xContentType) { return randomDataFrameIndexerPosition(); } @Override - protected org.elasticsearch.client.dataframe.transforms.DataFrameIndexerPosition doParseToClientInstance(XContentParser parser) { - return org.elasticsearch.client.dataframe.transforms.DataFrameIndexerPosition.fromXContent(parser); + protected org.elasticsearch.client.transform.transforms.DataFrameIndexerPosition doParseToClientInstance(XContentParser parser) { + return org.elasticsearch.client.transform.transforms.DataFrameIndexerPosition.fromXContent(parser); } @Override - protected void assertInstances(DataFrameIndexerPosition serverTestInstance, - org.elasticsearch.client.dataframe.transforms.DataFrameIndexerPosition clientInstance) { + protected void assertInstances(TransformIndexerPosition serverTestInstance, + org.elasticsearch.client.transform.transforms.DataFrameIndexerPosition clientInstance) { assertThat(serverTestInstance.getIndexerPosition(), equalTo(clientInstance.getIndexerPosition())); assertThat(serverTestInstance.getBucketsPosition(), equalTo(clientInstance.getBucketsPosition())); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameIndexerTransformStatsTests.java similarity index 70% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameIndexerTransformStatsTests.java index 9aa58105311..0c696c7368b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameIndexerTransformStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameIndexerTransformStatsTests.java @@ -17,21 +17,21 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.hlrc; +package org.elasticsearch.client.transform.transforms.hlrc; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.client.AbstractHlrcXContentTestCase; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; import java.io.IOException; public class DataFrameIndexerTransformStatsTests extends AbstractHlrcXContentTestCase< - DataFrameIndexerTransformStats, - org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformStats> { + TransformIndexerStats, + org.elasticsearch.client.transform.transforms.DataFrameIndexerTransformStats> { - public static DataFrameIndexerTransformStats fromHlrc( - org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformStats instance) { - return new DataFrameIndexerTransformStats( + public static TransformIndexerStats fromHlrc( + org.elasticsearch.client.transform.transforms.DataFrameIndexerTransformStats instance) { + return new TransformIndexerStats( instance.getNumPages(), instance.getNumDocuments(), instance.getOutputDocuments(), @@ -48,19 +48,19 @@ public class DataFrameIndexerTransformStatsTests extends AbstractHlrcXContentTes } @Override - public org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformStats doHlrcParseInstance(XContentParser parser) + public org.elasticsearch.client.transform.transforms.DataFrameIndexerTransformStats doHlrcParseInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformStats.fromXContent(parser); + return org.elasticsearch.client.transform.transforms.DataFrameIndexerTransformStats.fromXContent(parser); } @Override - public DataFrameIndexerTransformStats convertHlrcToInternal( - org.elasticsearch.client.dataframe.transforms.DataFrameIndexerTransformStats instance) { + public TransformIndexerStats convertHlrcToInternal( + org.elasticsearch.client.transform.transforms.DataFrameIndexerTransformStats instance) { return fromHlrc(instance); } - public static DataFrameIndexerTransformStats randomStats() { - return new DataFrameIndexerTransformStats(randomLongBetween(10L, 10000L), + public static TransformIndexerStats randomStats() { + return new TransformIndexerStats(randomLongBetween(10L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), @@ -70,13 +70,13 @@ public class DataFrameIndexerTransformStatsTests extends AbstractHlrcXContentTes } @Override - protected DataFrameIndexerTransformStats createTestInstance() { + protected TransformIndexerStats createTestInstance() { return randomStats(); } @Override - protected DataFrameIndexerTransformStats doParseInstance(XContentParser parser) throws IOException { - return DataFrameIndexerTransformStats.fromXContent(parser); + protected TransformIndexerStats doParseInstance(XContentParser parser) throws IOException { + return TransformIndexerStats.fromXContent(parser); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java similarity index 75% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java index 02bb3331bd5..6299431893c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameTransformCheckpointStatsTests.java @@ -17,51 +17,51 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.hlrc; +package org.elasticsearch.client.transform.transforms.hlrc; import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointStats; import java.io.IOException; import static org.hamcrest.Matchers.equalTo; public class DataFrameTransformCheckpointStatsTests extends AbstractResponseTestCase< - DataFrameTransformCheckpointStats, - org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats> { + TransformCheckpointStats, + org.elasticsearch.client.transform.transforms.DataFrameTransformCheckpointStats> { - public static DataFrameTransformCheckpointStats fromHlrc( - org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats instance) { - return new DataFrameTransformCheckpointStats(instance.getCheckpoint(), + public static TransformCheckpointStats fromHlrc( + org.elasticsearch.client.transform.transforms.DataFrameTransformCheckpointStats instance) { + return new TransformCheckpointStats(instance.getCheckpoint(), DataFrameIndexerPositionTests.fromHlrc(instance.getPosition()), DataFrameTransformProgressTests.fromHlrc(instance.getCheckpointProgress()), instance.getTimestampMillis(), instance.getTimeUpperBoundMillis()); } - public static DataFrameTransformCheckpointStats randomDataFrameTransformCheckpointStats() { - return new DataFrameTransformCheckpointStats(randomLongBetween(1, 1_000_000), + public static TransformCheckpointStats randomDataFrameTransformCheckpointStats() { + return new TransformCheckpointStats(randomLongBetween(1, 1_000_000), DataFrameIndexerPositionTests.randomDataFrameIndexerPosition(), randomBoolean() ? null : DataFrameTransformProgressTests.randomDataFrameTransformProgress(), randomLongBetween(1, 1_000_000), randomLongBetween(0, 1_000_000)); } @Override - protected DataFrameTransformCheckpointStats createServerTestInstance(XContentType xContentType) { + protected TransformCheckpointStats createServerTestInstance(XContentType xContentType) { return randomDataFrameTransformCheckpointStats(); } @Override - protected org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats doParseToClientInstance(XContentParser parser) + protected org.elasticsearch.client.transform.transforms.DataFrameTransformCheckpointStats doParseToClientInstance(XContentParser parser) throws IOException { - return org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats.fromXContent(parser); + return org.elasticsearch.client.transform.transforms.DataFrameTransformCheckpointStats.fromXContent(parser); } @Override - protected void assertInstances(DataFrameTransformCheckpointStats serverTestInstance, - org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointStats clientInstance) { + protected void assertInstances(TransformCheckpointStats serverTestInstance, + org.elasticsearch.client.transform.transforms.DataFrameTransformCheckpointStats clientInstance) { assertThat(serverTestInstance.getCheckpoint(), equalTo(clientInstance.getCheckpoint())); assertThat(serverTestInstance.getPosition().getBucketsPosition(), equalTo(clientInstance.getPosition().getBucketsPosition())); assertThat(serverTestInstance.getPosition().getIndexerPosition(), equalTo(clientInstance.getPosition().getIndexerPosition())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameTransformCheckpointingInfoTests.java similarity index 67% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameTransformCheckpointingInfoTests.java index d9cd95326e4..45db79b8256 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformCheckpointingInfoTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameTransformCheckpointingInfoTests.java @@ -17,23 +17,23 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.hlrc; +package org.elasticsearch.client.transform.transforms.hlrc; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.client.AbstractHlrcXContentTestCase; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointingInfo; import java.io.IOException; import java.time.Instant; import java.util.function.Predicate; public class DataFrameTransformCheckpointingInfoTests extends AbstractHlrcXContentTestCase< - DataFrameTransformCheckpointingInfo, - org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo> { + TransformCheckpointingInfo, + org.elasticsearch.client.transform.transforms.DataFrameTransformCheckpointingInfo> { - public static DataFrameTransformCheckpointingInfo fromHlrc( - org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo instance) { - return new DataFrameTransformCheckpointingInfo( + public static TransformCheckpointingInfo fromHlrc( + org.elasticsearch.client.transform.transforms.DataFrameTransformCheckpointingInfo instance) { + return new TransformCheckpointingInfo( DataFrameTransformCheckpointStatsTests.fromHlrc(instance.getLast()), DataFrameTransformCheckpointStatsTests.fromHlrc(instance.getNext()), instance.getOperationsBehind(), @@ -41,18 +41,18 @@ public class DataFrameTransformCheckpointingInfoTests extends AbstractHlrcXConte } @Override - public org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo doHlrcParseInstance(XContentParser parser) { - return org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo.fromXContent(parser); + public org.elasticsearch.client.transform.transforms.DataFrameTransformCheckpointingInfo doHlrcParseInstance(XContentParser parser) { + return org.elasticsearch.client.transform.transforms.DataFrameTransformCheckpointingInfo.fromXContent(parser); } @Override - public DataFrameTransformCheckpointingInfo convertHlrcToInternal( - org.elasticsearch.client.dataframe.transforms.DataFrameTransformCheckpointingInfo instance) { + public TransformCheckpointingInfo convertHlrcToInternal( + org.elasticsearch.client.transform.transforms.DataFrameTransformCheckpointingInfo instance) { return fromHlrc(instance); } - public static DataFrameTransformCheckpointingInfo randomDataFrameTransformCheckpointingInfo() { - return new DataFrameTransformCheckpointingInfo( + public static TransformCheckpointingInfo randomDataFrameTransformCheckpointingInfo() { + return new TransformCheckpointingInfo( DataFrameTransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(), DataFrameTransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(), randomNonNegativeLong(), @@ -60,13 +60,13 @@ public class DataFrameTransformCheckpointingInfoTests extends AbstractHlrcXConte } @Override - protected DataFrameTransformCheckpointingInfo createTestInstance() { + protected TransformCheckpointingInfo createTestInstance() { return randomDataFrameTransformCheckpointingInfo(); } @Override - protected DataFrameTransformCheckpointingInfo doParseInstance(XContentParser parser) throws IOException { - return DataFrameTransformCheckpointingInfo.fromXContent(parser); + protected TransformCheckpointingInfo doParseInstance(XContentParser parser) throws IOException { + return TransformCheckpointingInfo.fromXContent(parser); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformProgressTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameTransformProgressTests.java similarity index 70% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformProgressTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameTransformProgressTests.java index 83858bb43cf..ff6e797b7dd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformProgressTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameTransformProgressTests.java @@ -17,34 +17,34 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.hlrc; +package org.elasticsearch.client.transform.transforms.hlrc; import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; import static org.hamcrest.Matchers.equalTo; public class DataFrameTransformProgressTests extends AbstractResponseTestCase< - DataFrameTransformProgress, - org.elasticsearch.client.dataframe.transforms.DataFrameTransformProgress> { + TransformProgress, + org.elasticsearch.client.transform.transforms.DataFrameTransformProgress> { - public static DataFrameTransformProgress fromHlrc( - org.elasticsearch.client.dataframe.transforms.DataFrameTransformProgress instance) { + public static TransformProgress fromHlrc( + org.elasticsearch.client.transform.transforms.DataFrameTransformProgress instance) { if (instance == null) { return null; } - return new DataFrameTransformProgress(instance.getTotalDocs(), + return new TransformProgress(instance.getTotalDocs(), instance.getRemainingDocs(), instance.getDocumentsProcessed(), instance.getDocumentsIndexed()); } - public static DataFrameTransformProgress randomDataFrameTransformProgress() { + public static TransformProgress randomDataFrameTransformProgress() { Long totalDocs = randomBoolean() ? null : randomNonNegativeLong(); Long docsRemaining = totalDocs != null ? randomLongBetween(0, totalDocs) : null; - return new DataFrameTransformProgress( + return new TransformProgress( totalDocs, docsRemaining, totalDocs != null ? totalDocs - docsRemaining : randomNonNegativeLong(), @@ -52,18 +52,18 @@ public class DataFrameTransformProgressTests extends AbstractResponseTestCase< } @Override - protected DataFrameTransformProgress createServerTestInstance(XContentType xContentType) { + protected TransformProgress createServerTestInstance(XContentType xContentType) { return randomDataFrameTransformProgress(); } @Override - protected org.elasticsearch.client.dataframe.transforms.DataFrameTransformProgress doParseToClientInstance(XContentParser parser) { - return org.elasticsearch.client.dataframe.transforms.DataFrameTransformProgress.fromXContent(parser); + protected org.elasticsearch.client.transform.transforms.DataFrameTransformProgress doParseToClientInstance(XContentParser parser) { + return org.elasticsearch.client.transform.transforms.DataFrameTransformProgress.fromXContent(parser); } @Override - protected void assertInstances(DataFrameTransformProgress serverTestInstance, - org.elasticsearch.client.dataframe.transforms.DataFrameTransformProgress clientInstance) { + protected void assertInstances(TransformProgress serverTestInstance, + org.elasticsearch.client.transform.transforms.DataFrameTransformProgress clientInstance) { assertThat(serverTestInstance.getTotalDocs(), equalTo(clientInstance.getTotalDocs())); assertThat(serverTestInstance.getDocumentsProcessed(), equalTo(clientInstance.getDocumentsProcessed())); assertThat(serverTestInstance.getPercentComplete(), equalTo(clientInstance.getPercentComplete())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameTransformStatsTests.java similarity index 66% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStatsTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameTransformStatsTests.java index 42c22cbecc7..f58947be54e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/hlrc/DataFrameTransformStatsTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/hlrc/DataFrameTransformStatsTests.java @@ -17,16 +17,16 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.hlrc; +package org.elasticsearch.client.transform.transforms.hlrc; import org.elasticsearch.client.AbstractHlrcXContentTestCase; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.NodeAttributes; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointingInfo; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; +import org.elasticsearch.xpack.core.transform.transforms.TransformStats; +import org.elasticsearch.xpack.core.transform.transforms.NodeAttributes; import java.io.IOException; import java.time.Instant; @@ -34,10 +34,10 @@ import java.util.HashMap; import java.util.Map; import java.util.function.Predicate; -public class DataFrameTransformStatsTests extends AbstractHlrcXContentTestCase { +public class DataFrameTransformStatsTests extends AbstractHlrcXContentTestCase { - public static NodeAttributes fromHlrc(org.elasticsearch.client.dataframe.transforms.NodeAttributes attributes) { + public static NodeAttributes fromHlrc(org.elasticsearch.client.transform.transforms.NodeAttributes attributes) { return attributes == null ? null : new NodeAttributes(attributes.getId(), attributes.getName(), attributes.getEphemeralId(), @@ -45,11 +45,11 @@ public class DataFrameTransformStatsTests extends AbstractHlrcXContentTestCase field.contains("position") || field.equals("node.attributes"); } - public static DataFrameTransformProgress randomDataFrameTransformProgress() { + public static TransformProgress randomDataFrameTransformProgress() { Long totalDocs = randomBoolean() ? null : randomNonNegativeLong(); Long docsRemaining = totalDocs != null ? randomLongBetween(0, totalDocs) : null; - return new DataFrameTransformProgress( + return new TransformProgress( totalDocs, docsRemaining, totalDocs != null ? totalDocs - docsRemaining : randomNonNegativeLong(), randomBoolean() ? null : randomNonNegativeLong()); } - public static DataFrameTransformCheckpointingInfo randomDataFrameTransformCheckpointingInfo() { - return new DataFrameTransformCheckpointingInfo(randomDataFrameTransformCheckpointStats(), + public static TransformCheckpointingInfo randomDataFrameTransformCheckpointingInfo() { + return new TransformCheckpointingInfo(randomDataFrameTransformCheckpointStats(), randomDataFrameTransformCheckpointStats(), randomNonNegativeLong(), randomBoolean() ? null : Instant.ofEpochMilli(randomNonNegativeLong())); } - public static DataFrameTransformCheckpointStats randomDataFrameTransformCheckpointStats() { - return new DataFrameTransformCheckpointStats(randomLongBetween(1, 1_000_000), + public static TransformCheckpointStats randomDataFrameTransformCheckpointStats() { + return new TransformCheckpointStats(randomLongBetween(1, 1_000_000), DataFrameIndexerPositionTests.randomDataFrameIndexerPosition(), randomBoolean() ? null : DataFrameTransformProgressTests.randomDataFrameTransformProgress(), randomLongBetween(1, 1_000_000), randomLongBetween(0, 1_000_000)); @@ -134,8 +134,8 @@ public class DataFrameTransformStatsTests extends AbstractHlrcXContentTestCase { + extends AbstractResponseTestCase { - public static org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig randomTimeSyncConfig() { - return new org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig(randomAlphaOfLengthBetween(1, 10), + public static org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig randomTimeSyncConfig() { + return new org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig(randomAlphaOfLengthBetween(1, 10), new TimeValue(randomNonNegativeLong())); } - public static void assertHlrcEquals(org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig serverTestInstance, + public static void assertHlrcEquals(org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig serverTestInstance, TimeSyncConfig clientInstance) { assertEquals(serverTestInstance.getField(), clientInstance.getField()); assertEquals(serverTestInstance.getDelay(), clientInstance.getDelay()); } @Override - protected org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig createServerTestInstance(XContentType xContentType) { + protected org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig createServerTestInstance(XContentType xContentType) { return randomTimeSyncConfig(); } @@ -52,7 +52,7 @@ public class TimeSyncConfigTests } @Override - protected void assertInstances(org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig serverTestInstance, + protected void assertInstances(org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig serverTestInstance, TimeSyncConfig clientInstance) { assertHlrcEquals(serverTestInstance, clientInstance); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/AggregationConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/AggregationConfigTests.java similarity index 98% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/AggregationConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/AggregationConfigTests.java index 210dc59329c..a6a1a7ffe4e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/AggregationConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/AggregationConfigTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/DateHistogramGroupSourceTests.java similarity index 97% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/DateHistogramGroupSourceTests.java index ece1c4fb743..0723bcc8c90 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/DateHistogramGroupSourceTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/GroupConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/GroupConfigTests.java similarity index 98% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/GroupConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/GroupConfigTests.java index ab61f6abf48..f20fe098ecb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/GroupConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/GroupConfigTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.DeprecationHandler; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/HistogramGroupSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/HistogramGroupSourceTests.java similarity index 96% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/HistogramGroupSourceTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/HistogramGroupSourceTests.java index 18512c047a5..a10f7dde1bf 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/HistogramGroupSourceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/HistogramGroupSourceTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/PivotConfigTests.java similarity index 97% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfigTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/PivotConfigTests.java index 5cafcb9f419..43a24777826 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/PivotConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/PivotConfigTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/TermsGroupSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/TermsGroupSourceTests.java similarity index 96% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/TermsGroupSourceTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/TermsGroupSourceTests.java index 0a71566c22d..fdb264eeb31 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/TermsGroupSourceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/TermsGroupSourceTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot; +package org.elasticsearch.client.transform.transforms.pivot; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java similarity index 87% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java rename to client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java index e9f01ae5278..682cec9c022 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/transforms/pivot/hlrc/DateHistogramGroupSourceTests.java @@ -17,19 +17,19 @@ * under the License. */ -package org.elasticsearch.client.dataframe.transforms.pivot.hlrc; +package org.elasticsearch.client.transform.transforms.pivot.hlrc; import org.elasticsearch.client.AbstractResponseTestCase; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.DateHistogramGroupSource; +import org.elasticsearch.xpack.core.transform.transforms.pivot.DateHistogramGroupSource; import static org.hamcrest.Matchers.equalTo; public class DateHistogramGroupSourceTests extends AbstractResponseTestCase< DateHistogramGroupSource, - org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource> { + org.elasticsearch.client.transform.transforms.pivot.DateHistogramGroupSource> { public static DateHistogramGroupSource randomDateHistogramGroupSource() { String field = randomAlphaOfLengthBetween(1, 20); @@ -54,13 +54,13 @@ public class DateHistogramGroupSourceTests extends AbstractResponseTestCase< } @Override - protected org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource doParseToClientInstance(XContentParser parser) { - return org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource.fromXContent(parser); + protected org.elasticsearch.client.transform.transforms.pivot.DateHistogramGroupSource doParseToClientInstance(XContentParser parser) { + return org.elasticsearch.client.transform.transforms.pivot.DateHistogramGroupSource.fromXContent(parser); } @Override protected void assertInstances(DateHistogramGroupSource serverTestInstance, - org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource clientInstance) { + org.elasticsearch.client.transform.transforms.pivot.DateHistogramGroupSource clientInstance) { assertThat(serverTestInstance.getField(), equalTo(clientInstance.getField())); assertSameInterval(serverTestInstance.getInterval(), clientInstance.getInterval()); assertThat(serverTestInstance.getTimeZone(), equalTo(clientInstance.getTimeZone())); @@ -68,7 +68,7 @@ public class DateHistogramGroupSourceTests extends AbstractResponseTestCase< } private void assertSameInterval(DateHistogramGroupSource.Interval serverTestInstance, - org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource.Interval clientInstance) { + org.elasticsearch.client.transform.transforms.pivot.DateHistogramGroupSource.Interval clientInstance) { assertEquals(serverTestInstance.getName(), clientInstance.getName()); assertEquals(serverTestInstance.getInterval(), clientInstance.getInterval()); } diff --git a/client/rest/licenses/httpclient-4.5.10.jar.sha1 b/client/rest/licenses/httpclient-4.5.10.jar.sha1 new file mode 100644 index 00000000000..b708efd0dd5 --- /dev/null +++ b/client/rest/licenses/httpclient-4.5.10.jar.sha1 @@ -0,0 +1 @@ +7ca2e4276f4ef95e4db725a8cd4a1d1e7585b9e5 \ No newline at end of file diff --git a/client/rest/licenses/httpclient-4.5.8.jar.sha1 b/client/rest/licenses/httpclient-4.5.8.jar.sha1 deleted file mode 100644 index 73f0d30c709..00000000000 --- a/client/rest/licenses/httpclient-4.5.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-4.4.11.jar.sha1 b/client/rest/licenses/httpcore-4.4.11.jar.sha1 deleted file mode 100644 index 6d64372bfcc..00000000000 --- a/client/rest/licenses/httpcore-4.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de748cf874e4e193b42eceea9fe5574fabb9d4df \ No newline at end of file diff --git a/client/rest/licenses/httpcore-4.4.12.jar.sha1 b/client/rest/licenses/httpcore-4.4.12.jar.sha1 new file mode 100644 index 00000000000..3c046171b30 --- /dev/null +++ b/client/rest/licenses/httpcore-4.4.12.jar.sha1 @@ -0,0 +1 @@ +21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-4.4.11.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.11.jar.sha1 deleted file mode 100644 index 9e8777cb3da..00000000000 --- a/client/rest/licenses/httpcore-nio-4.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7d0a97d01d39cff9aa3e6db81f21fddb2435f4e6 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 new file mode 100644 index 00000000000..4de932dc5ac --- /dev/null +++ b/client/rest/licenses/httpcore-nio-4.4.12.jar.sha1 @@ -0,0 +1 @@ +84cd29eca842f31db02987cfedea245af020198b \ No newline at end of file diff --git a/client/rest/src/main/java/org/elasticsearch/client/Cancellable.java b/client/rest/src/main/java/org/elasticsearch/client/Cancellable.java new file mode 100644 index 00000000000..6a31ab3fe17 --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/Cancellable.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.apache.http.client.methods.AbstractExecutionAwareRequest; +import org.apache.http.client.methods.HttpRequestBase; + +import java.util.concurrent.CancellationException; + +/** + * Represents an operation that can be cancelled. + * Returned when executing async requests through {@link RestClient#performRequestAsync(Request, ResponseListener)}, so that the request + * can be cancelled if needed. Cancelling a request will result in calling {@link AbstractExecutionAwareRequest#abort()} on the underlying + * request object, which will in turn cancel its corresponding {@link java.util.concurrent.Future}. + * Note that cancelling a request does not automatically translate to aborting its execution on the server side, which needs to be + * specifically implemented in each API. + */ +public class Cancellable { + + static final Cancellable NO_OP = new Cancellable(null) { + @Override + public void cancel() { + } + + @Override + void runIfNotCancelled(Runnable runnable) { + throw new UnsupportedOperationException(); + } + }; + + static Cancellable fromRequest(HttpRequestBase httpRequest) { + return new Cancellable(httpRequest); + } + + private final HttpRequestBase httpRequest; + + private Cancellable(HttpRequestBase httpRequest) { + this.httpRequest = httpRequest; + } + + /** + * Cancels the on-going request that is associated with the current instance of {@link Cancellable}. + * + */ + public synchronized void cancel() { + this.httpRequest.abort(); + } + + /** + * Executes some arbitrary code iff the on-going request has not been cancelled, otherwise throws {@link CancellationException}. + * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different + * attempts of the same request. The low-level client reuses the same instance of the {@link AbstractExecutionAwareRequest} by calling + * {@link AbstractExecutionAwareRequest#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, + * and we need to handle the case where it gets called while there is no request being executed as one attempt may have failed and + * the subsequent attempt has not been started yet. + * If the request has already been cancelled we don't go ahead with the next attempt, and artificially raise the + * {@link CancellationException}, otherwise we run the provided {@link Runnable} which will reset the request and send the next attempt. + * Note that this method must be synchronized as well as the {@link #cancel()} method, to prevent a request from being cancelled + * when there is no future to cancel, which would make cancelling the request a no-op. + */ + synchronized void runIfNotCancelled(Runnable runnable) { + if (this.httpRequest.isAborted()) { + throw newCancellationException(); + } + runnable.run(); + } + + static CancellationException newCancellationException() { + return new CancellationException("request was cancelled"); + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 38185ac9609..a31732d7427 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -277,60 +277,64 @@ public class RestClient implements Closeable { * @param responseListener the {@link ResponseListener} to notify when the * request is completed or fails */ - public void performRequestAsync(Request request, ResponseListener responseListener) { + public Cancellable performRequestAsync(Request request, ResponseListener responseListener) { try { FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener); InternalRequest internalRequest = new InternalRequest(request); performRequestAsync(nextNodes(), internalRequest, failureTrackingResponseListener); + return internalRequest.cancellable; } catch (Exception e) { responseListener.onFailure(e); + return Cancellable.NO_OP; } } private void performRequestAsync(final NodeTuple> nodeTuple, final InternalRequest request, final FailureTrackingResponseListener listener) { - final RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); - client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, new FutureCallback() { - @Override - public void completed(HttpResponse httpResponse) { - try { - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); - if (responseOrResponseException.responseException == null) { - listener.onSuccess(responseOrResponseException.response); - } else { + request.cancellable.runIfNotCancelled(() -> { + final RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); + client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, new FutureCallback() { + @Override + public void completed(HttpResponse httpResponse) { + try { + ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + if (responseOrResponseException.responseException == null) { + listener.onSuccess(responseOrResponseException.response); + } else { + if (nodeTuple.nodes.hasNext()) { + listener.trackFailure(responseOrResponseException.responseException); + performRequestAsync(nodeTuple, request, listener); + } else { + listener.onDefinitiveFailure(responseOrResponseException.responseException); + } + } + } catch(Exception e) { + listener.onDefinitiveFailure(e); + } + } + + @Override + public void failed(Exception failure) { + try { + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); + onFailure(context.node); if (nodeTuple.nodes.hasNext()) { - listener.trackFailure(responseOrResponseException.responseException); + listener.trackFailure(failure); performRequestAsync(nodeTuple, request, listener); } else { - listener.onDefinitiveFailure(responseOrResponseException.responseException); + listener.onDefinitiveFailure(failure); } + } catch(Exception e) { + listener.onDefinitiveFailure(e); } - } catch(Exception e) { - listener.onDefinitiveFailure(e); } - } - @Override - public void failed(Exception failure) { - try { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); - onFailure(context.node); - if (nodeTuple.nodes.hasNext()) { - listener.trackFailure(failure); - performRequestAsync(nodeTuple, request, listener); - } else { - listener.onDefinitiveFailure(failure); - } - } catch(Exception e) { - listener.onDefinitiveFailure(e); + @Override + public void cancelled() { + listener.onDefinitiveFailure(Cancellable.newCancellationException()); } - } - - @Override - public void cancelled() { - listener.onDefinitiveFailure(new ExecutionException("request was cancelled", null)); - } + }); }); } @@ -651,19 +655,20 @@ public class RestClient implements Closeable { private class InternalRequest { private final Request request; - private final Map params; private final Set ignoreErrorCodes; private final HttpRequestBase httpRequest; + private final Cancellable cancellable; private final WarningsHandler warningsHandler; InternalRequest(Request request) { this.request = request; - this.params = new HashMap<>(request.getParameters()); + Map params = new HashMap<>(request.getParameters()); //ignore is a special parameter supported by the clients, shouldn't be sent to es String ignoreString = params.remove("ignore"); this.ignoreErrorCodes = getIgnoreErrorCodes(ignoreString, request.getMethod()); URI uri = buildUri(pathPrefix, request.getEndpoint(), params); this.httpRequest = createHttpRequest(request.getMethod(), uri, request.getEntity()); + this.cancellable = Cancellable.fromRequest(httpRequest); setHeaders(httpRequest, request.getOptions().getHeaders()); this.warningsHandler = request.getOptions().getWarningsHandler() == null ? RestClient.this.warningsHandler : request.getOptions().getWarningsHandler(); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java index 4cc16c45bab..9a893fa58a2 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsIntegTests.java @@ -35,6 +35,7 @@ import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.concurrent.CancellationException; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -42,7 +43,9 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; +import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -52,6 +55,7 @@ import static org.junit.Assert.fail; */ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { + private static WaitForCancelHandler waitForCancelHandler; private static HttpServer[] httpServers; private static HttpHost[] httpHosts; private static boolean stoppedFirstHost = false; @@ -70,6 +74,7 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { int numHttpServers = randomIntBetween(2, 4); httpServers = new HttpServer[numHttpServers]; httpHosts = new HttpHost[numHttpServers]; + waitForCancelHandler = new WaitForCancelHandler(); for (int i = 0; i < numHttpServers; i++) { HttpServer httpServer = createHttpServer(); httpServers[i] = httpServer; @@ -94,9 +99,40 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { for (int statusCode : getAllStatusCodes()) { httpServer.createContext(pathPrefix + "/" + statusCode, new ResponseHandler(statusCode)); } + httpServer.createContext(pathPrefix + "/wait", waitForCancelHandler); return httpServer; } + private static class WaitForCancelHandler implements HttpHandler { + private volatile CountDownLatch requestCameInLatch; + private volatile CountDownLatch cancelHandlerLatch; + + void reset() { + cancelHandlerLatch = new CountDownLatch(1); + requestCameInLatch = new CountDownLatch(1); + } + + void cancelDone() { + cancelHandlerLatch.countDown(); + } + + void awaitRequest() throws InterruptedException { + requestCameInLatch.await(); + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + requestCameInLatch.countDown(); + try { + cancelHandlerLatch.await(); + } catch (InterruptedException ignore) { + } finally { + exchange.sendResponseHeaders(200, 0); + exchange.close(); + } + } + } + private static class ResponseHandler implements HttpHandler { private final int statusCode; @@ -127,7 +163,7 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { //verify that shutting down some hosts doesn't matter as long as one working host is left behind if (httpServers.length > 1 && randomBoolean()) { List updatedHttpServers = new ArrayList<>(httpServers.length - 1); - int nodeIndex = randomInt(httpServers.length - 1); + int nodeIndex = randomIntBetween(0, httpServers.length - 1); if (0 == nodeIndex) { stoppedFirstHost = true; } @@ -139,7 +175,7 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { updatedHttpServers.add(httpServer); } } - httpServers = updatedHttpServers.toArray(new HttpServer[updatedHttpServers.size()]); + httpServers = updatedHttpServers.toArray(new HttpServer[0]); } } @@ -195,6 +231,42 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { } } + public void testCancelAsyncRequests() throws Exception { + int numRequests = randomIntBetween(5, 20); + final List responses = new CopyOnWriteArrayList<>(); + final List exceptions = new CopyOnWriteArrayList<>(); + for (int i = 0; i < numRequests; i++) { + CountDownLatch latch = new CountDownLatch(1); + waitForCancelHandler.reset(); + Cancellable cancellable = restClient.performRequestAsync(new Request("GET", "/wait"), new ResponseListener() { + @Override + public void onSuccess(Response response) { + responses.add(response); + latch.countDown(); + } + + @Override + public void onFailure(Exception exception) { + exceptions.add(exception); + latch.countDown(); + } + }); + if (randomBoolean()) { + //we wait for the request to get to the server-side otherwise we almost always cancel + // the request artificially on the client-side before even sending it + waitForCancelHandler.awaitRequest(); + } + cancellable.cancel(); + waitForCancelHandler.cancelDone(); + assertTrue(latch.await(5, TimeUnit.SECONDS)); + } + assertEquals(0, responses.size()); + assertEquals(numRequests, exceptions.size()); + for (Exception exception : exceptions) { + assertThat(exception, instanceOf(CancellationException.class)); + } + } + /** * Test host selector against a real server and * test what happens after calling @@ -249,13 +321,10 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { } private NodeSelector firstPositionNodeSelector() { - return new NodeSelector() { - @Override - public void select(Iterable nodes) { - for (Iterator itr = nodes.iterator(); itr.hasNext();) { - if (httpHosts[0] != itr.next().getHost()) { - itr.remove(); - } + return nodes -> { + for (Iterator itr = nodes.iterator(); itr.hasNext();) { + if (httpHosts[0] != itr.next().getHost()) { + itr.remove(); } } }; diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index f3df9bf3bfd..21d4e9d0e81 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -243,19 +243,16 @@ public class RestClientMultipleHostsTests extends RestClientTestCase { } public void testNodeSelector() throws Exception { - NodeSelector firstPositionOnly = new NodeSelector() { - @Override - public void select(Iterable restClientNodes) { - boolean found = false; - for (Iterator itr = restClientNodes.iterator(); itr.hasNext();) { - if (nodes.get(0) == itr.next()) { - found = true; - } else { - itr.remove(); - } + NodeSelector firstPositionOnly = restClientNodes -> { + boolean found = false; + for (Iterator itr = restClientNodes.iterator(); itr.hasNext();) { + if (nodes.get(0) == itr.next()) { + found = true; + } else { + itr.remove(); } - assertTrue(found); } + assertTrue(found); }; RestClient restClient = createRestClient(firstPositionOnly); int rounds = between(1, 10); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index e3fd3c31137..9c7c777fe14 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -26,11 +26,15 @@ import com.sun.net.httpserver.HttpServer; import org.apache.http.Consts; import org.apache.http.Header; import org.apache.http.HttpHost; +import org.apache.http.HttpResponse; import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.entity.ContentType; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.TargetAuthenticationStrategy; +import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; @@ -49,16 +53,22 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CancellationException; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; +import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod; import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -73,6 +83,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { private RestClient restClient; private String pathPrefix; private Header[] defaultHeaders; + private WaitForCancelHandler waitForCancelHandler; @Before public void startHttpServer() throws Exception { @@ -89,9 +100,31 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { for (int statusCode : getAllStatusCodes()) { httpServer.createContext(pathPrefix + "/" + statusCode, new ResponseHandler(statusCode)); } + waitForCancelHandler = new WaitForCancelHandler(); + httpServer.createContext(pathPrefix + "/wait", waitForCancelHandler); return httpServer; } + private static class WaitForCancelHandler implements HttpHandler { + + private final CountDownLatch cancelHandlerLatch = new CountDownLatch(1); + + void cancelDone() { + cancelHandlerLatch.countDown(); + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + try { + cancelHandlerLatch.await(); + } catch (InterruptedException ignore) { + } finally { + exchange.sendResponseHeaders(200, 0); + exchange.close(); + } + } + } + private static class ResponseHandler implements HttpHandler { private final int statusCode; @@ -201,6 +234,82 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { } } + public void testCancelAsyncRequest() throws Exception { + Request request = new Request(randomHttpMethod(getRandom()), "/wait"); + CountDownLatch requestLatch = new CountDownLatch(1); + AtomicReference error = new AtomicReference<>(); + Cancellable cancellable = restClient.performRequestAsync(request, new ResponseListener() { + @Override + public void onSuccess(Response response) { + throw new AssertionError("onResponse called unexpectedly"); + } + + @Override + public void onFailure(Exception exception) { + error.set(exception); + requestLatch.countDown(); + } + }); + cancellable.cancel(); + waitForCancelHandler.cancelDone(); + assertTrue(requestLatch.await(5, TimeUnit.SECONDS)); + assertThat(error.get(), instanceOf(CancellationException.class)); + } + + /** + * This test verifies some assumptions that we rely upon around the way the async http client works when reusing the same request + * throughout multiple retries, and the use of the {@link HttpRequestBase#abort()} method. + * In fact the low-level REST client reuses the same request instance throughout multiple retries, and relies on the http client + * to set the future ref to the request properly so that when abort is called, the proper future gets cancelled. + */ + public void testRequestResetAndAbort() throws Exception { + try (CloseableHttpAsyncClient client = HttpAsyncClientBuilder.create().build()) { + client.start(); + HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); + HttpGet httpGet = new HttpGet(pathPrefix + "/200"); + + //calling abort before the request is sent is a no-op + httpGet.abort(); + assertTrue(httpGet.isAborted()); + + { + httpGet.reset(); + assertFalse(httpGet.isAborted()); + httpGet.abort(); + Future future = client.execute(httpHost, httpGet, null); + try { + future.get(); + fail("expected cancellation exception"); + } catch(CancellationException e) { + //expected + } + assertTrue(future.isCancelled()); + } + { + httpGet.reset(); + Future future = client.execute(httpHost, httpGet, null); + assertFalse(httpGet.isAborted()); + httpGet.abort(); + assertTrue(httpGet.isAborted()); + try { + assertTrue(future.isCancelled()); + future.get(); + throw new AssertionError("exception should have been thrown"); + } catch(CancellationException e) { + //expected + } + } + { + httpGet.reset(); + assertFalse(httpGet.isAborted()); + Future future = client.execute(httpHost, httpGet, null); + assertFalse(httpGet.isAborted()); + assertEquals(200, future.get().getStatusLine().getStatusCode()); + assertFalse(future.isCancelled()); + } + } + } + /** * End to end test for headers. We test it explicitly against a real http client as there are different ways * to set/add headers to the {@link org.apache.http.client.HttpClient}. @@ -356,7 +465,6 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { assertThat(response200.getHeader("Authorization"), startsWith("Basic")); } } - } public void testUrlWithoutLeadingSlash() throws Exception { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 3894fca7d0a..dd133f90daa 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -52,7 +52,6 @@ import org.apache.http.util.EntityUtils; import org.junit.After; import org.junit.Before; import org.mockito.ArgumentCaptor; -import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import javax.net.ssl.SSLHandshakeException; @@ -68,7 +67,6 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -126,30 +124,24 @@ public class RestClientSingleHostTests extends RestClientTestCase { static CloseableHttpAsyncClient mockHttpClient(final ExecutorService exec) { CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); when(httpClient.execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), any(FutureCallback.class))).thenAnswer(new Answer>() { - @Override - public Future answer(InvocationOnMock invocationOnMock) throws Throwable { + any(HttpClientContext.class), any(FutureCallback.class))).thenAnswer((Answer>) invocationOnMock -> { final HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; final FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; // Call the callback asynchronous to better simulate how async http client works - return exec.submit(new Callable() { - @Override - public HttpResponse call() throws Exception { - if (futureCallback != null) { - try { - HttpResponse httpResponse = responseOrException(requestProducer); - futureCallback.completed(httpResponse); - } catch(Exception e) { - futureCallback.failed(e); - } - return null; + return exec.submit(() -> { + if (futureCallback != null) { + try { + HttpResponse httpResponse = responseOrException(requestProducer); + futureCallback.completed(httpResponse); + } catch(Exception e) { + futureCallback.failed(e); } - return responseOrException(requestProducer); + return null; } + return responseOrException(requestProducer); }); - } - }); + }); return httpClient; } diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index 8653db4226f..7ade990e1f8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -36,6 +36,7 @@ import org.apache.http.nio.entity.NStringEntity; import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Cancellable; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; @@ -206,16 +207,17 @@ public class RestClientDocumentation { Request request = new Request( "GET", // <1> "/"); // <2> - restClient.performRequestAsync(request, new ResponseListener() { - @Override - public void onSuccess(Response response) { - // <3> - } + Cancellable cancellable = restClient.performRequestAsync(request, + new ResponseListener() { + @Override + public void onSuccess(Response response) { + // <3> + } - @Override - public void onFailure(Exception exception) { - // <4> - } + @Override + public void onFailure(Exception exception) { + // <4> + } }); //end::rest-client-async } @@ -271,6 +273,26 @@ public class RestClientDocumentation { latch.await(); //end::rest-client-async-example } + { + //tag::rest-client-async-cancel + Request request = new Request("GET", "/posts/_search"); + Cancellable cancellable = restClient.performRequestAsync( + request, + new ResponseListener() { + @Override + public void onSuccess(Response response) { + // <1> + } + + @Override + public void onFailure(Exception exception) { + // <2> + } + } + ); + cancellable.cancel(); + //end::rest-client-async-cancel + } { //tag::rest-client-response2 Response response = restClient.performRequest(new Request("GET", "/")); diff --git a/client/sniffer/licenses/httpclient-4.5.10.jar.sha1 b/client/sniffer/licenses/httpclient-4.5.10.jar.sha1 new file mode 100644 index 00000000000..b708efd0dd5 --- /dev/null +++ b/client/sniffer/licenses/httpclient-4.5.10.jar.sha1 @@ -0,0 +1 @@ +7ca2e4276f4ef95e4db725a8cd4a1d1e7585b9e5 \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient-4.5.8.jar.sha1 b/client/sniffer/licenses/httpclient-4.5.8.jar.sha1 deleted file mode 100644 index 73f0d30c709..00000000000 --- a/client/sniffer/licenses/httpclient-4.5.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.11.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.11.jar.sha1 deleted file mode 100644 index 6d64372bfcc..00000000000 --- a/client/sniffer/licenses/httpcore-4.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de748cf874e4e193b42eceea9fe5574fabb9d4df \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 new file mode 100644 index 00000000000..3c046171b30 --- /dev/null +++ b/client/sniffer/licenses/httpcore-4.4.12.jar.sha1 @@ -0,0 +1 @@ +21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index ad4c77aaa5e..f3905eae4b7 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -24,6 +24,7 @@ import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.plugin.PluginBuildPlugin +import org.elasticsearch.gradle.tar.SymbolicLinkPreservingTar import java.nio.file.Files import java.nio.file.Path @@ -141,49 +142,49 @@ Closure commonTarConfig = { fileMode 0644 } -task buildDarwinTar(type: Tar) { +task buildDarwinTar(type: SymbolicLinkPreservingTar) { configure(commonTarConfig) archiveClassifier = 'darwin-x86_64' with archiveFiles(modulesFiles(false, 'darwin'), 'tar', 'darwin', false, true) } -task buildOssDarwinTar(type: Tar) { +task buildOssDarwinTar(type: SymbolicLinkPreservingTar) { configure(commonTarConfig) archiveClassifier = 'darwin-x86_64' with archiveFiles(modulesFiles(true, 'darwin'), 'tar', 'darwin', true, true) } -task buildNoJdkDarwinTar(type: Tar) { +task buildNoJdkDarwinTar(type: SymbolicLinkPreservingTar) { configure(commonTarConfig) archiveClassifier = 'no-jdk-darwin-x86_64' with archiveFiles(modulesFiles(false, 'darwin'), 'tar', 'darwin', false, false) } -task buildOssNoJdkDarwinTar(type: Tar) { +task buildOssNoJdkDarwinTar(type: SymbolicLinkPreservingTar) { configure(commonTarConfig) archiveClassifier = 'no-jdk-darwin-x86_64' with archiveFiles(modulesFiles(true, 'darwin'), 'tar', 'darwin', true, false) } -task buildLinuxTar(type: Tar) { +task buildLinuxTar(type: SymbolicLinkPreservingTar) { configure(commonTarConfig) archiveClassifier = 'linux-x86_64' with archiveFiles(modulesFiles(false, 'linux'), 'tar', 'linux', false, true) } -task buildOssLinuxTar(type: Tar) { +task buildOssLinuxTar(type: SymbolicLinkPreservingTar) { configure(commonTarConfig) archiveClassifier = 'linux-x86_64' with archiveFiles(modulesFiles(true, 'linux'), 'tar', 'linux', true, true) } -task buildNoJdkLinuxTar(type: Tar) { +task buildNoJdkLinuxTar(type: SymbolicLinkPreservingTar) { configure(commonTarConfig) archiveClassifier = 'no-jdk-linux-x86_64' with archiveFiles(modulesFiles(false, 'linux'), 'tar', 'linux', false, false) } -task buildOssNoJdkLinuxTar(type: Tar) { +task buildOssNoJdkLinuxTar(type: SymbolicLinkPreservingTar) { configure(commonTarConfig) archiveClassifier = 'no-jdk-linux-x86_64' with archiveFiles(modulesFiles(true, 'linux'), 'tar', 'linux', true, false) @@ -375,4 +376,3 @@ configure(subprojects.findAll { it.name == 'integ-test-zip' }) { } } } - diff --git a/distribution/build.gradle b/distribution/build.gradle index d71038f36c7..3b5777fc71b 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -26,7 +26,8 @@ import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.test.RunTask import java.nio.file.Files -import java.nio.file.Path +import java.nio.file.Path + /***************************************************************************** * Third party dependencies report * *****************************************************************************/ @@ -384,11 +385,12 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } - jdkFiles = { project, platform -> + jdkFiles = { Project project, String platform -> project.jdks { "bundled_${platform}" { it.platform = platform it.version = VersionProperties.bundledJdk + it.vendor = VersionProperties.bundledJdkVendor } } return copySpec { diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index aa35598c9cd..3642efa7746 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -105,7 +105,7 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased dependsOn fetchLatest doLast { String refspec = System.getProperty("bwc.refspec.${bwcBranch}") ?: System.getProperty("tests.bwc.refspec.${bwcBranch}") ?: "${remote}/${bwcBranch}" - if (System.getProperty("bwc.checkout.align") != null || System.getProperty("tests.bwc.checkout.align") != null) { + if (System.getProperty("bwc.checkout.align") != null) { /* We use a time based approach to make the bwc versions built deterministic and compatible with the current hash. Most of the time we want to test against latest, but when running delayed exhaustive tests or wanting @@ -141,12 +141,14 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased } } - logger.lifecycle("Checkout hash for ${project.path} is ${refspec}") + logger.lifecycle("Performing checkout of ${refspec}...") LoggedExec.exec(project) { spec -> spec.workingDir = checkoutDir spec.commandLine "git", "checkout", refspec } - file("${project.buildDir}/refspec").text = GlobalBuildInfoPlugin.gitRevision(checkoutDir) + String checkoutHash = GlobalBuildInfoPlugin.gitRevision(checkoutDir) + logger.lifecycle("Checkout hash for ${project.path} is ${checkoutHash}") + file("${project.buildDir}/refspec").text = checkoutHash } } @@ -221,8 +223,14 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased Task bwcTask = createRunBwcGradleTask(buildBwcTaskName(projectName)) { inputs.file("${project.buildDir}/refspec") outputs.files(projectArtifact) - outputs.cacheIf { true } + outputs.cacheIf("BWC distribution caching is disabled on 'master' branch") { + // Don't bother caching in 'master' since the BWC branches move too quickly to make this cost worthwhile + project.ext.isCi && System.getenv('GIT_BRANCH')?.endsWith("master") == false + } args ":${projectDir.replace('/', ':')}:assemble" + if (project.gradle.startParameter.buildCacheEnabled) { + args "--build-cache" + } doLast { if (projectArtifact.exists() == false) { throw new InvalidUserDataException("Building ${bwcVersion} didn't generate expected file ${projectArtifact}") diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index c1b188d721d..6a1c7eda521 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,24 +1,13 @@ -:version: 7.5.0 -//// -bare_version never includes -alpha or -beta -//// -:bare_version: 7.5.0 -:major-version: 7.x -:prev-major-version: 6.x + +include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] + :lucene_version: 8.2.0 :lucene_version_path: 8_2_0 -:branch: 7.x :jdk: 1.8.0_131 :jdk_major: 8 :build_flavor: default :build_type: tar -////////// -release-state can be: released | prerelease | unreleased -////////// - -:release-state: unreleased - :issue: https://github.com/elastic/elasticsearch/issues/ :ml-issue: https://github.com/elastic/ml-cpp/issues/ :pull: https://github.com/elastic/elasticsearch/pull/ diff --git a/docs/java-api/docs/bulk.asciidoc b/docs/java-api/docs/bulk.asciidoc index 1c2882d9c07..9976ba52544 100644 --- a/docs/java-api/docs/bulk.asciidoc +++ b/docs/java-api/docs/bulk.asciidoc @@ -130,9 +130,9 @@ or bulkProcessor.close(); -------------------------------------------------- -Both methods flush any remaining documents and disable all other scheduled flushes if they were scheduled by setting -`flushInterval`. If concurrent requests were enabled the `awaitClose` method waits for up to the specified timeout for -all bulk requests to complete then returns `true`, if the specified waiting time elapses before all bulk requests complete, +Both methods flush any remaining documents and disable all other scheduled flushes, if they were scheduled by setting +`flushInterval`. If concurrent requests were enabled, the `awaitClose` method waits for up to the specified timeout for +all bulk requests to complete then returns `true`; if the specified waiting time elapses before all bulk requests complete, `false` is returned. The `close` method doesn't wait for any remaining bulk requests to complete and exits immediately. [[java-docs-bulk-processor-tests]] diff --git a/docs/java-rest/high-level/ccr/delete_auto_follow_pattern.asciidoc b/docs/java-rest/high-level/ccr/delete_auto_follow_pattern.asciidoc index f79dbd5d39d..49aee815b89 100644 --- a/docs/java-rest/high-level/ccr/delete_auto_follow_pattern.asciidoc +++ b/docs/java-rest/high-level/ccr/delete_auto_follow_pattern.asciidoc @@ -3,7 +3,7 @@ :request: DeleteAutoFollowPatternRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete Auto Follow Pattern API diff --git a/docs/java-rest/high-level/ccr/forget_follower.asciidoc b/docs/java-rest/high-level/ccr/forget_follower.asciidoc index bf1fde014b8..b889993a4e9 100644 --- a/docs/java-rest/high-level/ccr/forget_follower.asciidoc +++ b/docs/java-rest/high-level/ccr/forget_follower.asciidoc @@ -3,7 +3,7 @@ :request: ForgetFollowerRequest :response: BroadcastResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Forget Follower API diff --git a/docs/java-rest/high-level/ccr/get_auto_follow_pattern.asciidoc b/docs/java-rest/high-level/ccr/get_auto_follow_pattern.asciidoc index 61ab8d58e9c..98c9e541019 100644 --- a/docs/java-rest/high-level/ccr/get_auto_follow_pattern.asciidoc +++ b/docs/java-rest/high-level/ccr/get_auto_follow_pattern.asciidoc @@ -3,7 +3,7 @@ :request: GetAutoFollowPatternRequest :response: GetAutoFollowPatternResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Auto Follow Pattern API diff --git a/docs/java-rest/high-level/ccr/get_follow_info.asciidoc b/docs/java-rest/high-level/ccr/get_follow_info.asciidoc index 3d4e8d5456e..70a71c1c90b 100644 --- a/docs/java-rest/high-level/ccr/get_follow_info.asciidoc +++ b/docs/java-rest/high-level/ccr/get_follow_info.asciidoc @@ -3,7 +3,7 @@ :request: FollowInfoRequest :response: FollowInfoResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Follow Info API diff --git a/docs/java-rest/high-level/ccr/get_follow_stats.asciidoc b/docs/java-rest/high-level/ccr/get_follow_stats.asciidoc index 15b98abc686..a510a53b70c 100644 --- a/docs/java-rest/high-level/ccr/get_follow_stats.asciidoc +++ b/docs/java-rest/high-level/ccr/get_follow_stats.asciidoc @@ -3,7 +3,7 @@ :request: FollowStatsRequest :response: FollowStatsResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Follow Stats API diff --git a/docs/java-rest/high-level/ccr/get_stats.asciidoc b/docs/java-rest/high-level/ccr/get_stats.asciidoc index 28c9e107a09..6c8502302fc 100644 --- a/docs/java-rest/high-level/ccr/get_stats.asciidoc +++ b/docs/java-rest/high-level/ccr/get_stats.asciidoc @@ -3,7 +3,7 @@ :request: CcrStatsRequest :response: CcrStatsResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get CCR Stats API diff --git a/docs/java-rest/high-level/ccr/pause_follow.asciidoc b/docs/java-rest/high-level/ccr/pause_follow.asciidoc index de81afa1e83..70694da0e81 100644 --- a/docs/java-rest/high-level/ccr/pause_follow.asciidoc +++ b/docs/java-rest/high-level/ccr/pause_follow.asciidoc @@ -3,7 +3,7 @@ :request: PauseFollowRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Pause Follow API diff --git a/docs/java-rest/high-level/ccr/put_auto_follow_pattern.asciidoc b/docs/java-rest/high-level/ccr/put_auto_follow_pattern.asciidoc index e6cc6b89ee8..7ee9ccbe9d6 100644 --- a/docs/java-rest/high-level/ccr/put_auto_follow_pattern.asciidoc +++ b/docs/java-rest/high-level/ccr/put_auto_follow_pattern.asciidoc @@ -3,7 +3,7 @@ :request: PutAutoFollowPatternRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put Auto Follow Pattern API diff --git a/docs/java-rest/high-level/ccr/put_follow.asciidoc b/docs/java-rest/high-level/ccr/put_follow.asciidoc index 2f40bbd5d2b..c1991dcf492 100644 --- a/docs/java-rest/high-level/ccr/put_follow.asciidoc +++ b/docs/java-rest/high-level/ccr/put_follow.asciidoc @@ -3,7 +3,7 @@ :request: PutFollowRequest :response: PutFollowResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put Follow API diff --git a/docs/java-rest/high-level/ccr/resume_follow.asciidoc b/docs/java-rest/high-level/ccr/resume_follow.asciidoc index 18d69b69d49..e30f83115fa 100644 --- a/docs/java-rest/high-level/ccr/resume_follow.asciidoc +++ b/docs/java-rest/high-level/ccr/resume_follow.asciidoc @@ -3,7 +3,7 @@ :request: ResumeFollowRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Resume Follow API diff --git a/docs/java-rest/high-level/ccr/unfollow.asciidoc b/docs/java-rest/high-level/ccr/unfollow.asciidoc index 779b8c3f586..946a2c6e618 100644 --- a/docs/java-rest/high-level/ccr/unfollow.asciidoc +++ b/docs/java-rest/high-level/ccr/unfollow.asciidoc @@ -3,7 +3,7 @@ :request: UnfollowRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Unfollow API diff --git a/docs/java-rest/high-level/getting-started.asciidoc b/docs/java-rest/high-level/getting-started.asciidoc index 89912cc2a45..0f3b66e6674 100644 --- a/docs/java-rest/high-level/getting-started.asciidoc +++ b/docs/java-rest/high-level/getting-started.asciidoc @@ -154,3 +154,23 @@ executes the request. For example, this is the place where you'd specify a `NodeSelector` to control which node receives the request. See the <> for more examples of customizing the options. +=== Asynchronous usage + +All of the the methods across the different clients exist in a traditional synchronous and +asynchronous variant. The difference is that the asynchronous ones use asynchronous requests +in the REST Low Level Client. This is useful if you are doing multiple requests or are using e.g. +rx java, Kotlin co-routines, or similar frameworks. + +The asynchronous methods are recognizable by the fact that they have the word "Async" in their name +and return a `Cancellable` instance. The asynchronous methods accept the same request object +as the synchronous variant and accept a generic `ActionListener` where `T` is the return +type of the synchronous method. + +All asynchronous methods return a `Cancellable` object with a `cancel` method that you may call +in case you want to abort the request. Cancelling +no longer needed requests is a good way to avoid putting unnecessary +load on Elasticsearch. + +Using the `Cancellable` instance is optional and you can safely ignore this if you have +no need for this. A use case for this would be using this with e.g. Kotlin's `suspendCancellableCoRoutine`. + diff --git a/docs/java-rest/high-level/graph/explore.asciidoc b/docs/java-rest/high-level/graph/explore.asciidoc index f2718209f4b..a178dfbc3a4 100644 --- a/docs/java-rest/high-level/graph/explore.asciidoc +++ b/docs/java-rest/high-level/graph/explore.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-x-pack-graph-explore]] === X-Pack Graph explore API diff --git a/docs/java-rest/high-level/ilm/delete_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/delete_lifecycle_policy.asciidoc index e6f100294ae..a68a2d9de5b 100644 --- a/docs/java-rest/high-level/ilm/delete_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/delete_lifecycle_policy.asciidoc @@ -3,7 +3,7 @@ :request: DeleteLifecyclePolicyRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete Lifecycle Policy API diff --git a/docs/java-rest/high-level/ilm/delete_snapshot_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/delete_snapshot_lifecycle_policy.asciidoc index 66819d06187..4079ac3dc08 100644 --- a/docs/java-rest/high-level/ilm/delete_snapshot_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/delete_snapshot_lifecycle_policy.asciidoc @@ -3,7 +3,7 @@ :request: DeleteSnapshotLifecyclePolicyRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete Snapshot Lifecycle Policy API diff --git a/docs/java-rest/high-level/ilm/execute_snapshot_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/execute_snapshot_lifecycle_policy.asciidoc index 7b3af935a27..b2c36a4e273 100644 --- a/docs/java-rest/high-level/ilm/execute_snapshot_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/execute_snapshot_lifecycle_policy.asciidoc @@ -3,7 +3,7 @@ :request: ExecuteSnapshotLifecyclePolicyRequest :response: ExecuteSnapshotLifecyclePolicyResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Execute Snapshot Lifecycle Policy API diff --git a/docs/java-rest/high-level/ilm/explain_lifecycle.asciidoc b/docs/java-rest/high-level/ilm/explain_lifecycle.asciidoc index 028f34793fe..b85d482299a 100644 --- a/docs/java-rest/high-level/ilm/explain_lifecycle.asciidoc +++ b/docs/java-rest/high-level/ilm/explain_lifecycle.asciidoc @@ -3,7 +3,7 @@ :request: ExplainLifecycleRequest :response: ExplainLifecycleResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Explain Lifecycle API diff --git a/docs/java-rest/high-level/ilm/get_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/get_lifecycle_policy.asciidoc index b86fad5880f..506c2c736e5 100644 --- a/docs/java-rest/high-level/ilm/get_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/get_lifecycle_policy.asciidoc @@ -3,7 +3,7 @@ :request: GetLifecyclePolicyRequest :response: GetLifecyclePolicyResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Lifecycle Policy API diff --git a/docs/java-rest/high-level/ilm/get_snapshot_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/get_snapshot_lifecycle_policy.asciidoc index eaa8af7969e..da51760961c 100644 --- a/docs/java-rest/high-level/ilm/get_snapshot_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/get_snapshot_lifecycle_policy.asciidoc @@ -3,7 +3,7 @@ :request: GetSnapshotLifecyclePolicyRequest :response: GetSnapshotLifecyclePolicyResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Snapshot Lifecycle Policy API diff --git a/docs/java-rest/high-level/ilm/get_snapshot_lifecycle_stats.asciidoc b/docs/java-rest/high-level/ilm/get_snapshot_lifecycle_stats.asciidoc new file mode 100644 index 00000000000..c9ff0a9880c --- /dev/null +++ b/docs/java-rest/high-level/ilm/get_snapshot_lifecycle_stats.asciidoc @@ -0,0 +1,35 @@ +-- +:api: slm-get-snapshot-lifecycle-stats +:request: GetSnapshotLifecycleStatsRequest +:response: GetSnapshotLifecycleStatsResponse +-- +[role="xpack"] +[id="{upid}-{api}"] +=== Get Snapshot Lifecycle Stats API + + +[id="{upid}-{api}-request"] +==== Request + +The Get Snapshot Lifecycle Stats API allows you to retrieve statistics about snapshots taken or +deleted, as well as retention runs by the snapshot lifecycle service. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ contains global statistics as well as a map of `SnapshotPolicyStats`, +accessible by the id of the policy, which contains statistics about each policy. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/ilm/lifecycle_management_status.asciidoc b/docs/java-rest/high-level/ilm/lifecycle_management_status.asciidoc index 713c5480cae..6bf4344477e 100644 --- a/docs/java-rest/high-level/ilm/lifecycle_management_status.asciidoc +++ b/docs/java-rest/high-level/ilm/lifecycle_management_status.asciidoc @@ -3,7 +3,7 @@ :request: LifecycleManagementStatusRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Index Lifecycle Management Status API diff --git a/docs/java-rest/high-level/ilm/put_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/put_lifecycle_policy.asciidoc index 75103fa5bdf..7947f54ffbc 100644 --- a/docs/java-rest/high-level/ilm/put_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/put_lifecycle_policy.asciidoc @@ -3,7 +3,7 @@ :request: PutLifecyclePolicyRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put Lifecycle Policy API diff --git a/docs/java-rest/high-level/ilm/put_snapshot_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/put_snapshot_lifecycle_policy.asciidoc index 7fe7fec26c3..13a0bb6e782 100644 --- a/docs/java-rest/high-level/ilm/put_snapshot_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/put_snapshot_lifecycle_policy.asciidoc @@ -3,7 +3,7 @@ :request: PutSnapshotLifecyclePolicyRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put Snapshot Lifecycle Policy API diff --git a/docs/java-rest/high-level/ilm/remove_lifecycle_policy_from_index.asciidoc b/docs/java-rest/high-level/ilm/remove_lifecycle_policy_from_index.asciidoc index 96c6b0be1b1..4b12e89d6aa 100644 --- a/docs/java-rest/high-level/ilm/remove_lifecycle_policy_from_index.asciidoc +++ b/docs/java-rest/high-level/ilm/remove_lifecycle_policy_from_index.asciidoc @@ -3,7 +3,7 @@ :request: RemoveIndexLifecyclePolicyRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Remove Policy from Index API diff --git a/docs/java-rest/high-level/ilm/retry_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/retry_lifecycle_policy.asciidoc index 89dd4ea1cfa..2798b1fecfd 100644 --- a/docs/java-rest/high-level/ilm/retry_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/retry_lifecycle_policy.asciidoc @@ -3,7 +3,7 @@ :request: RetryLifecyclePolicyRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Retry Lifecycle Policy API diff --git a/docs/java-rest/high-level/ilm/start_lifecycle_management.asciidoc b/docs/java-rest/high-level/ilm/start_lifecycle_management.asciidoc index d65e7dd5009..20a77259663 100644 --- a/docs/java-rest/high-level/ilm/start_lifecycle_management.asciidoc +++ b/docs/java-rest/high-level/ilm/start_lifecycle_management.asciidoc @@ -3,7 +3,7 @@ :request: StartILMRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Start Index Lifecycle Management API diff --git a/docs/java-rest/high-level/ilm/stop_lifecycle_management.asciidoc b/docs/java-rest/high-level/ilm/stop_lifecycle_management.asciidoc index 85117fe311a..04c30e1012f 100644 --- a/docs/java-rest/high-level/ilm/stop_lifecycle_management.asciidoc +++ b/docs/java-rest/high-level/ilm/stop_lifecycle_management.asciidoc @@ -3,7 +3,7 @@ :request: StopILMRequest :response: AcknowledgedResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Stop Index Lifecycle Management API diff --git a/docs/java-rest/high-level/ml/close-job.asciidoc b/docs/java-rest/high-level/ml/close-job.asciidoc index bf14fa0f21a..95a73356d34 100644 --- a/docs/java-rest/high-level/ml/close-job.asciidoc +++ b/docs/java-rest/high-level/ml/close-job.asciidoc @@ -3,6 +3,7 @@ :request: CloseJobRequest :response: CloseJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Close {anomaly-job} API diff --git a/docs/java-rest/high-level/ml/delete-calendar-event.asciidoc b/docs/java-rest/high-level/ml/delete-calendar-event.asciidoc index ffad85dd45b..a8c9641110b 100644 --- a/docs/java-rest/high-level/ml/delete-calendar-event.asciidoc +++ b/docs/java-rest/high-level/ml/delete-calendar-event.asciidoc @@ -3,6 +3,7 @@ :request: DeleteCalendarEventRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete calendar event API Removes a scheduled event from an existing {ml} calendar. diff --git a/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc b/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc index 5e4463b97e5..6add6f7da66 100644 --- a/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc +++ b/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc @@ -3,6 +3,7 @@ :request: DeleteCalendarJobRequest :response: PutCalendarResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete {anomaly-jobs} from calendar API Removes {anomaly-jobs} from an existing {ml} calendar. diff --git a/docs/java-rest/high-level/ml/delete-calendar.asciidoc b/docs/java-rest/high-level/ml/delete-calendar.asciidoc index e39a1997d52..fa0bd126169 100644 --- a/docs/java-rest/high-level/ml/delete-calendar.asciidoc +++ b/docs/java-rest/high-level/ml/delete-calendar.asciidoc @@ -3,6 +3,7 @@ :request: DeleteCalendarRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete calendar API Delete a {ml} calendar. diff --git a/docs/java-rest/high-level/ml/delete-data-frame-analytics.asciidoc b/docs/java-rest/high-level/ml/delete-data-frame-analytics.asciidoc index 2e5ade37107..cb321d95c38 100644 --- a/docs/java-rest/high-level/ml/delete-data-frame-analytics.asciidoc +++ b/docs/java-rest/high-level/ml/delete-data-frame-analytics.asciidoc @@ -3,26 +3,27 @@ :request: DeleteDataFrameAnalyticsRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Delete Data Frame Analytics API +=== Delete {dfanalytics-jobs} API -The Delete Data Frame Analytics API is used to delete an existing {dataframe-analytics-config}. +Delete an existing {dfanalytics-job}. The API accepts a +{request}+ object as a request and returns a +{response}+. [id="{upid}-{api}-request"] -==== Delete Data Frame Analytics Request +==== Delete {dfanalytics-jobs} request -A +{request}+ object requires a {dataframe-analytics-config} id. +A +{request}+ object requires a {dfanalytics-job} ID. ["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- -<1> Constructing a new request referencing an existing {dataframe-analytics-config} +<1> Constructing a new request referencing an existing {dfanalytics-job}. include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Response -The returned +{response}+ object acknowledges the {dataframe-analytics-config} deletion. +The returned +{response}+ object acknowledges the {dfanalytics-job} deletion. diff --git a/docs/java-rest/high-level/ml/delete-datafeed.asciidoc b/docs/java-rest/high-level/ml/delete-datafeed.asciidoc index ddc8c352729..cbb314dbf56 100644 --- a/docs/java-rest/high-level/ml/delete-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/delete-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: DeleteDatafeedRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-delete-datafeed"] === Delete datafeed API diff --git a/docs/java-rest/high-level/ml/delete-expired-data.asciidoc b/docs/java-rest/high-level/ml/delete-expired-data.asciidoc index 03bd013b2ab..8dc47750cbe 100644 --- a/docs/java-rest/high-level/ml/delete-expired-data.asciidoc +++ b/docs/java-rest/high-level/ml/delete-expired-data.asciidoc @@ -4,6 +4,7 @@ :request: DeleteExpiredRequest :response: DeleteExpiredResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete Expired Data API Delete expired {ml} data. diff --git a/docs/java-rest/high-level/ml/delete-filter.asciidoc b/docs/java-rest/high-level/ml/delete-filter.asciidoc index abdcdcb5392..7c68414d67c 100644 --- a/docs/java-rest/high-level/ml/delete-filter.asciidoc +++ b/docs/java-rest/high-level/ml/delete-filter.asciidoc @@ -3,6 +3,7 @@ :request: DeleteFilterRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete Filter API Delete a {ml} filter. diff --git a/docs/java-rest/high-level/ml/delete-forecast.asciidoc b/docs/java-rest/high-level/ml/delete-forecast.asciidoc index 961254b4815..2c654c9a230 100644 --- a/docs/java-rest/high-level/ml/delete-forecast.asciidoc +++ b/docs/java-rest/high-level/ml/delete-forecast.asciidoc @@ -3,6 +3,7 @@ :request: DeleteForecastRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete Forecast API diff --git a/docs/java-rest/high-level/ml/delete-job.asciidoc b/docs/java-rest/high-level/ml/delete-job.asciidoc index 300b3edef68..c32483dd1fc 100644 --- a/docs/java-rest/high-level/ml/delete-job.asciidoc +++ b/docs/java-rest/high-level/ml/delete-job.asciidoc @@ -3,6 +3,7 @@ :request: DeleteJobRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete {anomaly-job} API diff --git a/docs/java-rest/high-level/ml/delete-model-snapshot.asciidoc b/docs/java-rest/high-level/ml/delete-model-snapshot.asciidoc index 6ede01901da..b9a4f5f3736 100644 --- a/docs/java-rest/high-level/ml/delete-model-snapshot.asciidoc +++ b/docs/java-rest/high-level/ml/delete-model-snapshot.asciidoc @@ -3,6 +3,7 @@ :request: DeleteModelSnapshotRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Delete Model Snapshot API diff --git a/docs/java-rest/high-level/ml/estimate-memory-usage.asciidoc b/docs/java-rest/high-level/ml/estimate-memory-usage.asciidoc index 659e7e11755..8b7ae0f55c8 100644 --- a/docs/java-rest/high-level/ml/estimate-memory-usage.asciidoc +++ b/docs/java-rest/high-level/ml/estimate-memory-usage.asciidoc @@ -3,16 +3,17 @@ :request: PutDataFrameAnalyticsRequest :response: EstimateMemoryUsageResponse -- +[role="xpack"] [id="{upid}-{api}"] === Estimate memory usage API -The Estimate memory usage API is used to estimate memory usage of {dfanalytics}. +Estimates memory usage of {dfanalytics}. Estimation results can be used when deciding the appropriate value for `model_memory_limit` setting later on. The API accepts an +{request}+ object and returns an +{response}+. [id="{upid}-{api}-request"] -==== Estimate memory usage Request +==== Estimate memory usage request ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/ml/evaluate-data-frame.asciidoc b/docs/java-rest/high-level/ml/evaluate-data-frame.asciidoc index 1fe4cc7af01..7c231d7103b 100644 --- a/docs/java-rest/high-level/ml/evaluate-data-frame.asciidoc +++ b/docs/java-rest/high-level/ml/evaluate-data-frame.asciidoc @@ -3,14 +3,15 @@ :request: EvaluateDataFrameRequest :response: EvaluateDataFrameResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Evaluate Data Frame API +=== Evaluate {dfanalytics} API -The Evaluate Data Frame API is used to evaluate an ML algorithm that ran on a {dataframe}. +Evaluates the {ml} algorithm that ran on a {dataframe}. The API accepts an +{request}+ object and returns an +{response}+. [id="{upid}-{api}-request"] -==== Evaluate Data Frame Request +==== Evaluate {dfanalytics} request ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/ml/find-file-structure.asciidoc b/docs/java-rest/high-level/ml/find-file-structure.asciidoc index 5882fc0fce2..bb0fbea91b3 100644 --- a/docs/java-rest/high-level/ml/find-file-structure.asciidoc +++ b/docs/java-rest/high-level/ml/find-file-structure.asciidoc @@ -3,6 +3,7 @@ :request: FindFileStructureRequest :response: FindFileStructureResponse -- +[role="xpack"] [id="{upid}-{api}"] === Find File Structure API diff --git a/docs/java-rest/high-level/ml/flush-job.asciidoc b/docs/java-rest/high-level/ml/flush-job.asciidoc index e721d48d4d1..2300377801e 100644 --- a/docs/java-rest/high-level/ml/flush-job.asciidoc +++ b/docs/java-rest/high-level/ml/flush-job.asciidoc @@ -3,6 +3,7 @@ :request: FlushJobRequest :response: FlushJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Flush Job API diff --git a/docs/java-rest/high-level/ml/forecast-job.asciidoc b/docs/java-rest/high-level/ml/forecast-job.asciidoc index 48d899d6814..d9a1b615cac 100644 --- a/docs/java-rest/high-level/ml/forecast-job.asciidoc +++ b/docs/java-rest/high-level/ml/forecast-job.asciidoc @@ -3,6 +3,7 @@ :request: ForecastJobRequest :response: ForecastJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Forecast Job API diff --git a/docs/java-rest/high-level/ml/get-buckets.asciidoc b/docs/java-rest/high-level/ml/get-buckets.asciidoc index c679ce405d6..14c9406969e 100644 --- a/docs/java-rest/high-level/ml/get-buckets.asciidoc +++ b/docs/java-rest/high-level/ml/get-buckets.asciidoc @@ -3,6 +3,7 @@ :request: GetBucketsRequest :response: GetBucketsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get buckets API diff --git a/docs/java-rest/high-level/ml/get-calendar-events.asciidoc b/docs/java-rest/high-level/ml/get-calendar-events.asciidoc index a645d64cb13..e7fd5c47e17 100644 --- a/docs/java-rest/high-level/ml/get-calendar-events.asciidoc +++ b/docs/java-rest/high-level/ml/get-calendar-events.asciidoc @@ -3,6 +3,7 @@ :request: GetCalendarEventsRequest :response: GetCalendarEventsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get calendar events API Retrieves a calendar's events. diff --git a/docs/java-rest/high-level/ml/get-calendars.asciidoc b/docs/java-rest/high-level/ml/get-calendars.asciidoc index a4c2ae3fe34..5eed1333279 100644 --- a/docs/java-rest/high-level/ml/get-calendars.asciidoc +++ b/docs/java-rest/high-level/ml/get-calendars.asciidoc @@ -3,6 +3,7 @@ :request: GetCalendarsRequest :response: GetCalendarsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get calendars API Retrieves one or more calendar objects. diff --git a/docs/java-rest/high-level/ml/get-categories.asciidoc b/docs/java-rest/high-level/ml/get-categories.asciidoc index 425a2ae2f63..bcb5ed89253 100644 --- a/docs/java-rest/high-level/ml/get-categories.asciidoc +++ b/docs/java-rest/high-level/ml/get-categories.asciidoc @@ -3,6 +3,7 @@ :request: GetCategoriesRequest :response: GetCategoriesResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get categories API diff --git a/docs/java-rest/high-level/ml/get-data-frame-analytics-stats.asciidoc b/docs/java-rest/high-level/ml/get-data-frame-analytics-stats.asciidoc index e1047e9b3e0..09023e2c7b7 100644 --- a/docs/java-rest/high-level/ml/get-data-frame-analytics-stats.asciidoc +++ b/docs/java-rest/high-level/ml/get-data-frame-analytics-stats.asciidoc @@ -3,30 +3,33 @@ :request: GetDataFrameAnalyticsStatsRequest :response: GetDataFrameAnalyticsStatsResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Get Data Frame Analytics Stats API +=== Get {dfanalytics-jobs} stats API -The Get Data Frame Analytics Stats API is used to read the operational statistics of one or more {dataframe-analytics-config}s. +Retrieves the operational statistics of one or more {dfanalytics-jobs}. The API accepts a +{request}+ object and returns a +{response}+. [id="{upid}-{api}-request"] -==== Get Data Frame Analytics Stats Request +==== Get {dfanalytics-jobs} stats request -A +{request}+ requires either a {dataframe-analytics-config} id, a comma separated list of ids or -the special wildcard `_all` to get the statistics for all {dataframe-analytics-config}s +A +{request}+ requires either a {dfanalytics-job} ID, a comma-separated list of +IDs, or the special wildcard `_all` to get the statistics for all +{dfanalytics-jobs}. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> Constructing a new GET Stats request referencing an existing {dataframe-analytics-config} +<1> Constructing a new GET stats request referencing an existing +{dfanalytics-job} include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Response -The returned +{response}+ contains the requested {dataframe-analytics-config} statistics. +The returned +{response}+ contains the requested {dfanalytics-job} statistics. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/ml/get-data-frame-analytics.asciidoc b/docs/java-rest/high-level/ml/get-data-frame-analytics.asciidoc index c6d368efbca..2e956d43074 100644 --- a/docs/java-rest/high-level/ml/get-data-frame-analytics.asciidoc +++ b/docs/java-rest/high-level/ml/get-data-frame-analytics.asciidoc @@ -3,30 +3,31 @@ :request: GetDataFrameAnalyticsRequest :response: GetDataFrameAnalyticsResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Get Data Frame Analytics API +=== Get {dfanalytics-jobs} API -The Get Data Frame Analytics API is used to get one or more {dataframe-analytics-config}s. +Retrieves one or more {dfanalytics-jobs}. The API accepts a +{request}+ object and returns a +{response}+. [id="{upid}-{api}-request"] -==== Get Data Frame Analytics Request +==== Get {dfanalytics-jobs} request -A +{request}+ requires either a {dataframe-analytics-config} id, a comma separated list of ids or -the special wildcard `_all` to get all {dataframe-analytics-config}s. +A +{request}+ requires either a {dfanalytics-job} ID, a comma-separated list of +IDs, or the special wildcard `_all` to get all {dfanalytics-jobs}. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> Constructing a new GET request referencing an existing {dataframe-analytics-config} +<1> Constructing a new GET request referencing an existing {dfanalytics-job} include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Response -The returned +{response}+ contains the requested {dataframe-analytics-config}s. +The returned +{response}+ contains the requested {dfanalytics-jobs}. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/ml/get-datafeed-stats.asciidoc b/docs/java-rest/high-level/ml/get-datafeed-stats.asciidoc index 08e9498c35e..16055098162 100644 --- a/docs/java-rest/high-level/ml/get-datafeed-stats.asciidoc +++ b/docs/java-rest/high-level/ml/get-datafeed-stats.asciidoc @@ -3,6 +3,7 @@ :request: GetDatafeedStatsRequest :response: GetDatafeedStatsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get datafeed stats API diff --git a/docs/java-rest/high-level/ml/get-datafeed.asciidoc b/docs/java-rest/high-level/ml/get-datafeed.asciidoc index 977e821d9be..0aa5ec2ec0d 100644 --- a/docs/java-rest/high-level/ml/get-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/get-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: GetDatafeedRequest :response: GetDatafeedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get datafeed API diff --git a/docs/java-rest/high-level/ml/get-filters.asciidoc b/docs/java-rest/high-level/ml/get-filters.asciidoc index a7b723eb3c1..5d33e1e2d19 100644 --- a/docs/java-rest/high-level/ml/get-filters.asciidoc +++ b/docs/java-rest/high-level/ml/get-filters.asciidoc @@ -3,6 +3,7 @@ :request: GetFiltersRequest :response: GetFiltersResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get filters API diff --git a/docs/java-rest/high-level/ml/get-influencers.asciidoc b/docs/java-rest/high-level/ml/get-influencers.asciidoc index a428e106a6e..9096a103911 100644 --- a/docs/java-rest/high-level/ml/get-influencers.asciidoc +++ b/docs/java-rest/high-level/ml/get-influencers.asciidoc @@ -3,6 +3,7 @@ :request: GetInfluencersRequest :response: GetInfluencersResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get influencers API diff --git a/docs/java-rest/high-level/ml/get-info.asciidoc b/docs/java-rest/high-level/ml/get-info.asciidoc index 5c78a39116b..9d7bdbf0923 100644 --- a/docs/java-rest/high-level/ml/get-info.asciidoc +++ b/docs/java-rest/high-level/ml/get-info.asciidoc @@ -3,6 +3,7 @@ :request: MlInfoRequest :response: MlInfoResponse -- +[role="xpack"] [id="{upid}-{api}"] === ML get info API diff --git a/docs/java-rest/high-level/ml/get-job-stats.asciidoc b/docs/java-rest/high-level/ml/get-job-stats.asciidoc index 96f242ab895..c2fd8cfc2c7 100644 --- a/docs/java-rest/high-level/ml/get-job-stats.asciidoc +++ b/docs/java-rest/high-level/ml/get-job-stats.asciidoc @@ -3,6 +3,7 @@ :request: GetJobStatsRequest :response: GetJobStatsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get {anomaly-job} stats API diff --git a/docs/java-rest/high-level/ml/get-job.asciidoc b/docs/java-rest/high-level/ml/get-job.asciidoc index a958e175899..3fde9b98f31 100644 --- a/docs/java-rest/high-level/ml/get-job.asciidoc +++ b/docs/java-rest/high-level/ml/get-job.asciidoc @@ -3,6 +3,7 @@ :request: GetJobRequest :response: GetJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get {anomaly-jobs} API diff --git a/docs/java-rest/high-level/ml/get-model-snapshots.asciidoc b/docs/java-rest/high-level/ml/get-model-snapshots.asciidoc index 502ad42be43..d0cc7a3887f 100644 --- a/docs/java-rest/high-level/ml/get-model-snapshots.asciidoc +++ b/docs/java-rest/high-level/ml/get-model-snapshots.asciidoc @@ -3,6 +3,7 @@ :request: GetModelSnapshotsRequest :response: GetModelSnapshotsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get model snapshots API diff --git a/docs/java-rest/high-level/ml/get-overall-buckets.asciidoc b/docs/java-rest/high-level/ml/get-overall-buckets.asciidoc index 3c41e8fbe75..4fd7b806345 100644 --- a/docs/java-rest/high-level/ml/get-overall-buckets.asciidoc +++ b/docs/java-rest/high-level/ml/get-overall-buckets.asciidoc @@ -3,6 +3,7 @@ :request: GetOverallBucketsRequest :response: GetOverallBucketsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get overall buckets API diff --git a/docs/java-rest/high-level/ml/get-records.asciidoc b/docs/java-rest/high-level/ml/get-records.asciidoc index 7538bacc5fb..cd71345b2ca 100644 --- a/docs/java-rest/high-level/ml/get-records.asciidoc +++ b/docs/java-rest/high-level/ml/get-records.asciidoc @@ -3,6 +3,7 @@ :request: GetRecordsRequest :response: GetRecordsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get records API diff --git a/docs/java-rest/high-level/ml/open-job.asciidoc b/docs/java-rest/high-level/ml/open-job.asciidoc index b6f4e8ed1eb..bdc98c5035c 100644 --- a/docs/java-rest/high-level/ml/open-job.asciidoc +++ b/docs/java-rest/high-level/ml/open-job.asciidoc @@ -3,6 +3,7 @@ :request: OpenJobRequest :response: OpenJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Open {anomaly-job} API diff --git a/docs/java-rest/high-level/ml/post-calendar-event.asciidoc b/docs/java-rest/high-level/ml/post-calendar-event.asciidoc index ba7c69acf03..689df8067f8 100644 --- a/docs/java-rest/high-level/ml/post-calendar-event.asciidoc +++ b/docs/java-rest/high-level/ml/post-calendar-event.asciidoc @@ -3,6 +3,7 @@ :request: PostCalendarEventRequest :response: PostCalendarEventResponse -- +[role="xpack"] [id="{upid}-{api}"] === Post Calendar Event API Adds new ScheduledEvents to an existing {ml} calendar. diff --git a/docs/java-rest/high-level/ml/post-data.asciidoc b/docs/java-rest/high-level/ml/post-data.asciidoc index fd51dc80696..eff48f505b0 100644 --- a/docs/java-rest/high-level/ml/post-data.asciidoc +++ b/docs/java-rest/high-level/ml/post-data.asciidoc @@ -3,6 +3,7 @@ :request: PostDataRequest :response: PostDataResponse -- +[role="xpack"] [id="{upid}-{api}"] === Post Data API diff --git a/docs/java-rest/high-level/ml/preview-datafeed.asciidoc b/docs/java-rest/high-level/ml/preview-datafeed.asciidoc index 5b812af8344..2c5c4800147 100644 --- a/docs/java-rest/high-level/ml/preview-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/preview-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: PreviewDatafeedRequest :response: PreviewDatafeedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Preview Datafeed API diff --git a/docs/java-rest/high-level/ml/put-calendar-job.asciidoc b/docs/java-rest/high-level/ml/put-calendar-job.asciidoc index f178fa82c80..7e5d33244be 100644 --- a/docs/java-rest/high-level/ml/put-calendar-job.asciidoc +++ b/docs/java-rest/high-level/ml/put-calendar-job.asciidoc @@ -3,6 +3,7 @@ :request: PutCalendarJobRequest :response: PutCalendarResponse -- +[role="xpack"] [id="{upid}-{api}"] === Put {anomaly-jobs} in calendar API Adds {anomaly-jobs} jobs to an existing {ml} calendar. diff --git a/docs/java-rest/high-level/ml/put-calendar.asciidoc b/docs/java-rest/high-level/ml/put-calendar.asciidoc index be45f573bdb..caa406292d7 100644 --- a/docs/java-rest/high-level/ml/put-calendar.asciidoc +++ b/docs/java-rest/high-level/ml/put-calendar.asciidoc @@ -3,6 +3,7 @@ :request: PutCalendarRequest :response: PutCalendarResponse -- +[role="xpack"] [id="{upid}-{api}"] === Put calendar API Creates a new {ml} calendar. diff --git a/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc b/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc index 4520026f166..4d0fba4782f 100644 --- a/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc +++ b/docs/java-rest/high-level/ml/put-data-frame-analytics.asciidoc @@ -3,14 +3,15 @@ :request: PutDataFrameAnalyticsRequest :response: PutDataFrameAnalyticsResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Put Data Frame Analytics API +=== Put {dfanalytics-jobs} API -The Put Data Frame Analytics API is used to create a new {dataframe-analytics-config}. +Creates a new {dfanalytics-job}. The API accepts a +{request}+ object as a request and returns a +{response}+. [id="{upid}-{api}-request"] -==== Put Data Frame Analytics Request +==== Put {dfanalytics-jobs} request A +{request}+ requires the following argument: @@ -18,19 +19,19 @@ A +{request}+ requires the following argument: -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> The configuration of the {dataframe-job} to create +<1> The configuration of the {dfanalytics-job} to create [id="{upid}-{api}-config"] -==== Data Frame Analytics Configuration +==== {dfanalytics-cap} configuration -The `DataFrameAnalyticsConfig` object contains all the details about the {dataframe-job} +The `DataFrameAnalyticsConfig` object contains all the details about the {dfanalytics-job} configuration and contains the following arguments: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-config] -------------------------------------------------- -<1> The {dataframe-analytics-config} id +<1> The {dfanalytics-job} ID <2> The source index and query from which to gather data <3> The destination index <4> The analysis to be performed @@ -63,7 +64,7 @@ include-tagged::{doc-tests-file}[{api}-query-config] ==== DestinationConfig -The index to which data should be written by the {dataframe-job}. +The index to which data should be written by the {dfanalytics-job}. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -77,7 +78,7 @@ include-tagged::{doc-tests-file}[{api}-dest-config] The analysis to be performed. Currently, the supported analyses include : +OutlierDetection+, +Regression+. -===== Outlier Detection +===== Outlier detection +OutlierDetection+ analysis can be created in one of two ways: @@ -128,7 +129,7 @@ include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Response -The returned +{response}+ contains the newly created {dataframe-analytics-config}. +The returned +{response}+ contains the newly created {dfanalytics-job}. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/ml/put-datafeed.asciidoc b/docs/java-rest/high-level/ml/put-datafeed.asciidoc index cce72a5ee5f..0a388c7f3fc 100644 --- a/docs/java-rest/high-level/ml/put-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/put-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: PutDatafeedRequest :response: PutDatafeedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Put datafeed API diff --git a/docs/java-rest/high-level/ml/put-filter.asciidoc b/docs/java-rest/high-level/ml/put-filter.asciidoc index 2582e7715ab..41aedfc8213 100644 --- a/docs/java-rest/high-level/ml/put-filter.asciidoc +++ b/docs/java-rest/high-level/ml/put-filter.asciidoc @@ -3,6 +3,7 @@ :request: PutFilterRequest :response: PutFilterResponse -- +[role="xpack"] [id="{upid}-{api}"] === Put Filter API diff --git a/docs/java-rest/high-level/ml/put-job.asciidoc b/docs/java-rest/high-level/ml/put-job.asciidoc index 081c94782fa..1a4912ea589 100644 --- a/docs/java-rest/high-level/ml/put-job.asciidoc +++ b/docs/java-rest/high-level/ml/put-job.asciidoc @@ -3,6 +3,7 @@ :request: PutJobRequest :response: PutJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Put {anomaly-job} API diff --git a/docs/java-rest/high-level/ml/revert-model-snapshot.asciidoc b/docs/java-rest/high-level/ml/revert-model-snapshot.asciidoc index 7c45ce8ebf0..f347efa7340 100644 --- a/docs/java-rest/high-level/ml/revert-model-snapshot.asciidoc +++ b/docs/java-rest/high-level/ml/revert-model-snapshot.asciidoc @@ -3,6 +3,8 @@ :request: RevertModelSnapshotRequest :response: RevertModelSnapshotResponse -- +[role="xpack"] + [id="{upid}-{api}"] === Revert Model Snapshot API diff --git a/docs/java-rest/high-level/ml/set-upgrade-mode.asciidoc b/docs/java-rest/high-level/ml/set-upgrade-mode.asciidoc index 80bb1874e4a..a869d64afed 100644 --- a/docs/java-rest/high-level/ml/set-upgrade-mode.asciidoc +++ b/docs/java-rest/high-level/ml/set-upgrade-mode.asciidoc @@ -3,6 +3,7 @@ :request: SetUpgradeModeRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Set Upgrade Mode API diff --git a/docs/java-rest/high-level/ml/start-data-frame-analytics.asciidoc b/docs/java-rest/high-level/ml/start-data-frame-analytics.asciidoc index 610607daba1..27f9b22b7e5 100644 --- a/docs/java-rest/high-level/ml/start-data-frame-analytics.asciidoc +++ b/docs/java-rest/high-level/ml/start-data-frame-analytics.asciidoc @@ -3,26 +3,27 @@ :request: StartDataFrameAnalyticsRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Start Data Frame Analytics API +=== Start {dfanalytics-jobs} API -The Start Data Frame Analytics API is used to start an existing {dataframe-analytics-config}. +Starts an existing {dfanalytics-job}. It accepts a +{request}+ object and responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Start Data Frame Analytics Request +==== Start {dfanalytics-job} request -A +{request}+ object requires a {dataframe-analytics-config} id. +A +{request}+ object requires a {dfanalytics-job} ID. ["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- -<1> Constructing a new start request referencing an existing {dataframe-analytics-config} +<1> Constructing a new start request referencing an existing {dfanalytics-job} include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Response -The returned +{response}+ object acknowledges the {dataframe-job} has started. \ No newline at end of file +The returned +{response}+ object acknowledges the {dfanalytics-job} has started. \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/start-datafeed.asciidoc b/docs/java-rest/high-level/ml/start-datafeed.asciidoc index 821b404b0a5..84eff67380d 100644 --- a/docs/java-rest/high-level/ml/start-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/start-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: StartDatafeedRequest :response: StartDatafeedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Start datafeed API diff --git a/docs/java-rest/high-level/ml/stop-data-frame-analytics.asciidoc b/docs/java-rest/high-level/ml/stop-data-frame-analytics.asciidoc index 3a06f268836..47a2fc26241 100644 --- a/docs/java-rest/high-level/ml/stop-data-frame-analytics.asciidoc +++ b/docs/java-rest/high-level/ml/stop-data-frame-analytics.asciidoc @@ -3,22 +3,23 @@ :request: StopDataFrameAnalyticsRequest :response: StopDataFrameAnalyticsResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Stop Data Frame Analytics API +=== Stop {dfanalytics-jobs} API -The Stop Data Frame Analytics API is used to stop a running {dataframe-analytics-config}. +Stops a running {dfanalytics-job}. It accepts a +{request}+ object and responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Stop Data Frame Analytics Request +==== Stop {dfanalytics-jobs} request -A +{request}+ object requires a {dataframe-analytics-config} id. +A +{request}+ object requires a {dfanalytics-job} ID. ["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- -<1> Constructing a new stop request referencing an existing {dataframe-analytics-config} +<1> Constructing a new stop request referencing an existing {dfanalytics-job} <2> Optionally used to stop a failed task include::../execution.asciidoc[] @@ -26,4 +27,4 @@ include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Response -The returned +{response}+ object acknowledges the {dataframe-job} has stopped. \ No newline at end of file +The returned +{response}+ object acknowledges the {dfanalytics-job} has stopped. \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/stop-datafeed.asciidoc b/docs/java-rest/high-level/ml/stop-datafeed.asciidoc index 211d1c5ad7a..08958273b18 100644 --- a/docs/java-rest/high-level/ml/stop-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/stop-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: StopDatafeedRequest :response: StopDatafeedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Stop Datafeed API diff --git a/docs/java-rest/high-level/ml/update-datafeed.asciidoc b/docs/java-rest/high-level/ml/update-datafeed.asciidoc index c27efdb7d18..784f23f7aca 100644 --- a/docs/java-rest/high-level/ml/update-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/update-datafeed.asciidoc @@ -3,6 +3,7 @@ :request: UpdateDatafeedRequest :response: PutDatafeedResponse -- +[role="xpack"] [id="{upid}-{api}"] === Update datafeed API diff --git a/docs/java-rest/high-level/ml/update-filter.asciidoc b/docs/java-rest/high-level/ml/update-filter.asciidoc index 516ab597b6c..c1b82edfb72 100644 --- a/docs/java-rest/high-level/ml/update-filter.asciidoc +++ b/docs/java-rest/high-level/ml/update-filter.asciidoc @@ -3,6 +3,7 @@ :request: UpdateFilterRequest :response: PutFilterResponse -- +[role="xpack"] [id="{upid}-{api}"] === Update filter API diff --git a/docs/java-rest/high-level/ml/update-job.asciidoc b/docs/java-rest/high-level/ml/update-job.asciidoc index d9d47acaab7..b1531dfa1c3 100644 --- a/docs/java-rest/high-level/ml/update-job.asciidoc +++ b/docs/java-rest/high-level/ml/update-job.asciidoc @@ -3,6 +3,7 @@ :request: UpdateJobRequest :response: PutJobResponse -- +[role="xpack"] [id="{upid}-{api}"] === Update {anomaly-job} API diff --git a/docs/java-rest/high-level/ml/update-model-snapshot.asciidoc b/docs/java-rest/high-level/ml/update-model-snapshot.asciidoc index ec845b22137..99053efd3a2 100644 --- a/docs/java-rest/high-level/ml/update-model-snapshot.asciidoc +++ b/docs/java-rest/high-level/ml/update-model-snapshot.asciidoc @@ -3,6 +3,7 @@ :request: UpdateModelSnapshotRequest :response: UpdateModelSnapshotResponse -- +[role="xpack"] [id="{upid}-{api}"] === Update model snapshot API diff --git a/docs/java-rest/high-level/rollup/delete_job.asciidoc b/docs/java-rest/high-level/rollup/delete_job.asciidoc index c98a6fb7326..930713331ae 100644 --- a/docs/java-rest/high-level/rollup/delete_job.asciidoc +++ b/docs/java-rest/high-level/rollup/delete_job.asciidoc @@ -3,7 +3,7 @@ :request: DeleteRollupJobRequest :response: DeleteRollupJobResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete Rollup Job API diff --git a/docs/java-rest/high-level/rollup/get_job.asciidoc b/docs/java-rest/high-level/rollup/get_job.asciidoc index 68733113e53..ce6ca9feb30 100644 --- a/docs/java-rest/high-level/rollup/get_job.asciidoc +++ b/docs/java-rest/high-level/rollup/get_job.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-x-pack-rollup-get-job]] === Get Rollup Job API diff --git a/docs/java-rest/high-level/rollup/get_rollup_caps.asciidoc b/docs/java-rest/high-level/rollup/get_rollup_caps.asciidoc index c11f5d231b0..cc320558d4f 100644 --- a/docs/java-rest/high-level/rollup/get_rollup_caps.asciidoc +++ b/docs/java-rest/high-level/rollup/get_rollup_caps.asciidoc @@ -3,7 +3,7 @@ :request: GetRollupCapsRequest :response: GetRollupCapsResponse -- - +[role="xpack"] [id="{upid}-x-pack-{api}"] === Get Rollup Capabilities API diff --git a/docs/java-rest/high-level/rollup/get_rollup_index_caps.asciidoc b/docs/java-rest/high-level/rollup/get_rollup_index_caps.asciidoc index 52cb7ff9524..bd69a5cd55f 100644 --- a/docs/java-rest/high-level/rollup/get_rollup_index_caps.asciidoc +++ b/docs/java-rest/high-level/rollup/get_rollup_index_caps.asciidoc @@ -3,7 +3,7 @@ :request: GetRollupIndexCapsRequest :response: GetRollupIndexCapsResponse -- - +[role="xpack"] [id="{upid}-x-pack-{api}"] === Get Rollup Index Capabilities API diff --git a/docs/java-rest/high-level/rollup/put_job.asciidoc b/docs/java-rest/high-level/rollup/put_job.asciidoc index 9a83f6022ec..5e763616919 100644 --- a/docs/java-rest/high-level/rollup/put_job.asciidoc +++ b/docs/java-rest/high-level/rollup/put_job.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-x-pack-rollup-put-job]] === Put Rollup Job API diff --git a/docs/java-rest/high-level/rollup/search.asciidoc b/docs/java-rest/high-level/rollup/search.asciidoc index 49bf983edd4..6139cd7238d 100644 --- a/docs/java-rest/high-level/rollup/search.asciidoc +++ b/docs/java-rest/high-level/rollup/search.asciidoc @@ -3,7 +3,7 @@ :request: SearchRequest :response: SearchResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Rollup Search API diff --git a/docs/java-rest/high-level/rollup/start_job.asciidoc b/docs/java-rest/high-level/rollup/start_job.asciidoc index 6d760dc0b33..8cc94a234bd 100644 --- a/docs/java-rest/high-level/rollup/start_job.asciidoc +++ b/docs/java-rest/high-level/rollup/start_job.asciidoc @@ -3,7 +3,7 @@ :request: StartRollupJobRequest :response: StartRollupJobResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Start Rollup Job API diff --git a/docs/java-rest/high-level/rollup/stop_job.asciidoc b/docs/java-rest/high-level/rollup/stop_job.asciidoc index cba1dcdd2d3..56de078d45d 100644 --- a/docs/java-rest/high-level/rollup/stop_job.asciidoc +++ b/docs/java-rest/high-level/rollup/stop_job.asciidoc @@ -3,7 +3,7 @@ :request: StopRollupJobRequest :response: StopRollupJobResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Stop Rollup Job API diff --git a/docs/java-rest/high-level/security/authenticate.asciidoc b/docs/java-rest/high-level/security/authenticate.asciidoc index 4d4467a03b4..8f2a91a9ca5 100644 --- a/docs/java-rest/high-level/security/authenticate.asciidoc +++ b/docs/java-rest/high-level/security/authenticate.asciidoc @@ -3,7 +3,7 @@ :api: authenticate :response: AuthenticateResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Authenticate API diff --git a/docs/java-rest/high-level/security/change-password.asciidoc b/docs/java-rest/high-level/security/change-password.asciidoc index 36d66b194cf..6593e810598 100644 --- a/docs/java-rest/high-level/security/change-password.asciidoc +++ b/docs/java-rest/high-level/security/change-password.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-change-password]] === Change Password API diff --git a/docs/java-rest/high-level/security/clear-realm-cache.asciidoc b/docs/java-rest/high-level/security/clear-realm-cache.asciidoc index 5427db148d6..41c100e1ec8 100644 --- a/docs/java-rest/high-level/security/clear-realm-cache.asciidoc +++ b/docs/java-rest/high-level/security/clear-realm-cache.asciidoc @@ -4,7 +4,7 @@ :request: ClearRealmCacheRequest :response: ClearRealmCacheResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Clear Realm Cache API diff --git a/docs/java-rest/high-level/security/clear-roles-cache.asciidoc b/docs/java-rest/high-level/security/clear-roles-cache.asciidoc index 851824bab5f..39e344f6ce9 100644 --- a/docs/java-rest/high-level/security/clear-roles-cache.asciidoc +++ b/docs/java-rest/high-level/security/clear-roles-cache.asciidoc @@ -4,7 +4,7 @@ :request: ClearRolesCacheRequest :response: ClearRolesCacheResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Clear Roles Cache API diff --git a/docs/java-rest/high-level/security/create-api-key.asciidoc b/docs/java-rest/high-level/security/create-api-key.asciidoc index 93c3fa16de1..8a77f11484d 100644 --- a/docs/java-rest/high-level/security/create-api-key.asciidoc +++ b/docs/java-rest/high-level/security/create-api-key.asciidoc @@ -3,7 +3,7 @@ :request: CreateApiKeyRequest :response: CreateApiKeyResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Create API Key API diff --git a/docs/java-rest/high-level/security/create-token.asciidoc b/docs/java-rest/high-level/security/create-token.asciidoc index 33e55d4ed58..d911c747a13 100644 --- a/docs/java-rest/high-level/security/create-token.asciidoc +++ b/docs/java-rest/high-level/security/create-token.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-create-token]] === Create Token API diff --git a/docs/java-rest/high-level/security/delegate-pki-authentication.asciidoc b/docs/java-rest/high-level/security/delegate-pki-authentication.asciidoc index 9cb667c24dc..ca3f832f405 100644 --- a/docs/java-rest/high-level/security/delegate-pki-authentication.asciidoc +++ b/docs/java-rest/high-level/security/delegate-pki-authentication.asciidoc @@ -3,7 +3,7 @@ :request: DelegatePkiAuthenticationRequest :response: DelegatePkiAuthenticationResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delegate PKI Authentication API diff --git a/docs/java-rest/high-level/security/delete-privileges.asciidoc b/docs/java-rest/high-level/security/delete-privileges.asciidoc index 7f32d75107b..827ccf5b1e5 100644 --- a/docs/java-rest/high-level/security/delete-privileges.asciidoc +++ b/docs/java-rest/high-level/security/delete-privileges.asciidoc @@ -3,7 +3,7 @@ :request: DeletePrivilegesRequest :response: DeletePrivilegesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete Privileges API diff --git a/docs/java-rest/high-level/security/delete-role-mapping.asciidoc b/docs/java-rest/high-level/security/delete-role-mapping.asciidoc index 63025e9d681..5279d953688 100644 --- a/docs/java-rest/high-level/security/delete-role-mapping.asciidoc +++ b/docs/java-rest/high-level/security/delete-role-mapping.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-delete-role-mapping]] === Delete Role Mapping API diff --git a/docs/java-rest/high-level/security/delete-role.asciidoc b/docs/java-rest/high-level/security/delete-role.asciidoc index 0086b89bb68..d2f4ef6f88a 100644 --- a/docs/java-rest/high-level/security/delete-role.asciidoc +++ b/docs/java-rest/high-level/security/delete-role.asciidoc @@ -3,7 +3,7 @@ :request: DeleteRoleRequest :response: DeleteRoleResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete Role API diff --git a/docs/java-rest/high-level/security/delete-user.asciidoc b/docs/java-rest/high-level/security/delete-user.asciidoc index 52573bb29c7..43d65fc4e97 100644 --- a/docs/java-rest/high-level/security/delete-user.asciidoc +++ b/docs/java-rest/high-level/security/delete-user.asciidoc @@ -3,7 +3,7 @@ :request: DeleteUserRequest :response: DeleteUserResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Delete User API diff --git a/docs/java-rest/high-level/security/disable-user.asciidoc b/docs/java-rest/high-level/security/disable-user.asciidoc index 564b8699ebb..90b89c2779f 100644 --- a/docs/java-rest/high-level/security/disable-user.asciidoc +++ b/docs/java-rest/high-level/security/disable-user.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-disable-user]] === Disable User API diff --git a/docs/java-rest/high-level/security/enable-user.asciidoc b/docs/java-rest/high-level/security/enable-user.asciidoc index 4be0f38e39f..7e8bac12e27 100644 --- a/docs/java-rest/high-level/security/enable-user.asciidoc +++ b/docs/java-rest/high-level/security/enable-user.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-enable-user]] === Enable User API diff --git a/docs/java-rest/high-level/security/get-api-key.asciidoc b/docs/java-rest/high-level/security/get-api-key.asciidoc index 911acd3e92e..8a480df34f1 100644 --- a/docs/java-rest/high-level/security/get-api-key.asciidoc +++ b/docs/java-rest/high-level/security/get-api-key.asciidoc @@ -3,7 +3,7 @@ :request: GetApiKeyRequest :response: GetApiKeyResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get API Key information API diff --git a/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc b/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc index e8eeb7b3c94..8a79d20f39b 100644 --- a/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc +++ b/docs/java-rest/high-level/security/get-builtin-privileges.asciidoc @@ -3,7 +3,7 @@ :request: GetBuiltinPrivilegesRequest :response: GetBuiltinPrivilegesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Builtin Privileges API diff --git a/docs/java-rest/high-level/security/get-certificates.asciidoc b/docs/java-rest/high-level/security/get-certificates.asciidoc index 2f46cfc927a..5ada3c8a712 100644 --- a/docs/java-rest/high-level/security/get-certificates.asciidoc +++ b/docs/java-rest/high-level/security/get-certificates.asciidoc @@ -4,7 +4,7 @@ :response: GetSslCertificatesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === SSL Certificate API diff --git a/docs/java-rest/high-level/security/get-privileges.asciidoc b/docs/java-rest/high-level/security/get-privileges.asciidoc index 6eee8bbc3c1..d63f4774d07 100644 --- a/docs/java-rest/high-level/security/get-privileges.asciidoc +++ b/docs/java-rest/high-level/security/get-privileges.asciidoc @@ -4,7 +4,7 @@ :request: GetPrivilegesRequest :response: GetPrivilegesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Privileges API diff --git a/docs/java-rest/high-level/security/get-role-mappings.asciidoc b/docs/java-rest/high-level/security/get-role-mappings.asciidoc index cc58d0980c3..b279702a4e1 100644 --- a/docs/java-rest/high-level/security/get-role-mappings.asciidoc +++ b/docs/java-rest/high-level/security/get-role-mappings.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-get-role-mappings]] === Get Role Mappings API diff --git a/docs/java-rest/high-level/security/get-roles.asciidoc b/docs/java-rest/high-level/security/get-roles.asciidoc index 77734922299..2c698222c7a 100644 --- a/docs/java-rest/high-level/security/get-roles.asciidoc +++ b/docs/java-rest/high-level/security/get-roles.asciidoc @@ -4,7 +4,7 @@ :request: GetRolesRequest :response: GetRolesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Roles API diff --git a/docs/java-rest/high-level/security/get-user-privileges.asciidoc b/docs/java-rest/high-level/security/get-user-privileges.asciidoc index 641d238df64..b8051cbfae6 100644 --- a/docs/java-rest/high-level/security/get-user-privileges.asciidoc +++ b/docs/java-rest/high-level/security/get-user-privileges.asciidoc @@ -3,7 +3,7 @@ :request: GetUserPrivilegesRequest :response: GetUserPrivilegesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get User Privileges API diff --git a/docs/java-rest/high-level/security/get-users.asciidoc b/docs/java-rest/high-level/security/get-users.asciidoc index 1d41bd76166..cbd45801fe9 100644 --- a/docs/java-rest/high-level/security/get-users.asciidoc +++ b/docs/java-rest/high-level/security/get-users.asciidoc @@ -4,7 +4,7 @@ :request: GetUsersRequest :response: GetUsersResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get Users API diff --git a/docs/java-rest/high-level/security/has-privileges.asciidoc b/docs/java-rest/high-level/security/has-privileges.asciidoc index 181b1b7f481..7c5f09a171c 100644 --- a/docs/java-rest/high-level/security/has-privileges.asciidoc +++ b/docs/java-rest/high-level/security/has-privileges.asciidoc @@ -3,7 +3,7 @@ :request: HasPrivilegesRequest :response: HasPrivilegesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Has Privileges API diff --git a/docs/java-rest/high-level/security/invalidate-api-key.asciidoc b/docs/java-rest/high-level/security/invalidate-api-key.asciidoc index b8a99f932d9..d1f747da882 100644 --- a/docs/java-rest/high-level/security/invalidate-api-key.asciidoc +++ b/docs/java-rest/high-level/security/invalidate-api-key.asciidoc @@ -3,7 +3,7 @@ :request: InvalidateApiKeyRequest :response: InvalidateApiKeyResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Invalidate API Key API diff --git a/docs/java-rest/high-level/security/invalidate-token.asciidoc b/docs/java-rest/high-level/security/invalidate-token.asciidoc index 76d4909ff04..34969523c7b 100644 --- a/docs/java-rest/high-level/security/invalidate-token.asciidoc +++ b/docs/java-rest/high-level/security/invalidate-token.asciidoc @@ -3,7 +3,7 @@ :request: InvalidateTokenRequest :response: InvalidateTokenResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Invalidate Token API diff --git a/docs/java-rest/high-level/security/put-privileges.asciidoc b/docs/java-rest/high-level/security/put-privileges.asciidoc index 1c0a97d2a94..ba8d8878e15 100644 --- a/docs/java-rest/high-level/security/put-privileges.asciidoc +++ b/docs/java-rest/high-level/security/put-privileges.asciidoc @@ -3,7 +3,7 @@ :request: PutPrivilegesRequest :response: PutPrivilegesResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put Privileges API diff --git a/docs/java-rest/high-level/security/put-role-mapping.asciidoc b/docs/java-rest/high-level/security/put-role-mapping.asciidoc index f71c7648803..819aa776b68 100644 --- a/docs/java-rest/high-level/security/put-role-mapping.asciidoc +++ b/docs/java-rest/high-level/security/put-role-mapping.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-security-put-role-mapping]] === Put Role Mapping API diff --git a/docs/java-rest/high-level/security/put-role.asciidoc b/docs/java-rest/high-level/security/put-role.asciidoc index 68c1f5d69d4..d418375237d 100644 --- a/docs/java-rest/high-level/security/put-role.asciidoc +++ b/docs/java-rest/high-level/security/put-role.asciidoc @@ -4,7 +4,7 @@ :request: PutRoleRequest :response: PutRoleResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put Role API diff --git a/docs/java-rest/high-level/security/put-user.asciidoc b/docs/java-rest/high-level/security/put-user.asciidoc index 714dd61e119..bca93244175 100644 --- a/docs/java-rest/high-level/security/put-user.asciidoc +++ b/docs/java-rest/high-level/security/put-user.asciidoc @@ -3,7 +3,7 @@ :request: PutUserRequest :response: PutUserResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Put User API diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 49c0cf050c9..10da73805d2 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -250,6 +250,7 @@ include::licensing/start-basic.asciidoc[] include::licensing/get-trial-status.asciidoc[] include::licensing/get-basic-status.asciidoc[] +[role="xpack"] == Machine Learning APIs :upid: {mainid}-x-pack-ml :doc-tests-file: {doc-tests}/MlClientDocumentationIT.java @@ -371,6 +372,7 @@ The Java High Level REST Client supports the following Migration APIs: include::migration/get-deprecation-info.asciidoc[] +[role="xpack"] == Rollup APIs :upid: {mainid}-rollup @@ -396,6 +398,7 @@ include::rollup/search.asciidoc[] include::rollup/get_rollup_caps.asciidoc[] include::rollup/get_rollup_index_caps.asciidoc[] +[role="xpack"] == Security APIs :upid: {mainid}-security @@ -459,6 +462,7 @@ include::security/create-api-key.asciidoc[] include::security/get-api-key.asciidoc[] include::security/invalidate-api-key.asciidoc[] +[role="xpack"] == Watcher APIs :upid: {mainid}-watcher @@ -488,6 +492,7 @@ include::watcher/activate-watch.asciidoc[] include::watcher/execute-watch.asciidoc[] include::watcher/watcher-stats.asciidoc[] +[role="xpack"] == Graph APIs The Java High Level REST Client supports the following Graph APIs: @@ -508,6 +513,7 @@ don't leak into the rest of the documentation. :upid!: -- +[role="xpack"] == CCR APIs :upid: {mainid}-ccr @@ -539,6 +545,7 @@ include::ccr/get_stats.asciidoc[] include::ccr/get_follow_stats.asciidoc[] include::ccr/get_follow_info.asciidoc[] +[role="xpack"] == Index Lifecycle Management APIs :upid: {mainid}-ilm @@ -568,32 +575,33 @@ include::ilm/lifecycle_management_status.asciidoc[] include::ilm/retry_lifecycle_policy.asciidoc[] include::ilm/remove_lifecycle_policy_from_index.asciidoc[] -[[_data_frame_transform_apis]] -== {dataframe-transform-cap} APIs +[role="xpack"] +[[transform_apis]] +== {transform-cap} APIs -:upid: {mainid}-dataframe +:upid: {mainid} :doc-tests-file: {doc-tests}/DataFrameTransformDocumentationIT.java -The Java High Level REST Client supports the following {dataframe-transform} +The Java High Level REST Client supports the following {transform} APIs: -* <<{upid}-get-data-frame-transform>> -* <<{upid}-get-data-frame-transform-stats>> -* <<{upid}-put-data-frame-transform>> -* <<{upid}-update-data-frame-transform>> -* <<{upid}-delete-data-frame-transform>> -* <<{upid}-preview-data-frame-transform>> -* <<{upid}-start-data-frame-transform>> -* <<{upid}-stop-data-frame-transform>> +* <<{upid}-get-transform>> +* <<{upid}-get-transform-stats>> +* <<{upid}-put-transform>> +* <<{upid}-update-transform>> +* <<{upid}-delete-transform>> +* <<{upid}-preview-transform>> +* <<{upid}-start-transform>> +* <<{upid}-stop-transform>> -include::dataframe/get_data_frame.asciidoc[] -include::dataframe/get_data_frame_stats.asciidoc[] -include::dataframe/put_data_frame.asciidoc[] -include::dataframe/update_data_frame.asciidoc[] -include::dataframe/delete_data_frame.asciidoc[] -include::dataframe/preview_data_frame.asciidoc[] -include::dataframe/start_data_frame.asciidoc[] -include::dataframe/stop_data_frame.asciidoc[] +include::transform/get_transform.asciidoc[] +include::transform/get_transform_stats.asciidoc[] +include::transform/put_transform.asciidoc[] +include::transform/update_transform.asciidoc[] +include::transform/delete_transform.asciidoc[] +include::transform/preview_transform.asciidoc[] +include::transform/start_transform.asciidoc[] +include::transform/stop_transform.asciidoc[] == Enrich APIs diff --git a/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc b/docs/java-rest/high-level/transform/delete_transform.asciidoc similarity index 53% rename from docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc rename to docs/java-rest/high-level/transform/delete_transform.asciidoc index 5f04aa03718..31a86dd80b4 100644 --- a/docs/java-rest/high-level/dataframe/delete_data_frame.asciidoc +++ b/docs/java-rest/high-level/transform/delete_transform.asciidoc @@ -1,15 +1,16 @@ -- -:api: delete-data-frame-transform +:api: delete-transform :request: DeleteDataFrameTransformRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Delete {dataframe-transform} API +=== Delete {transform} API -Deletes an existing {dataframe-transform}. +Deletes an existing {transform}. [id="{upid}-{api}-request"] -==== Delete {dataframe-transform} request +==== Delete {transform} request A +{request}+ object requires a non-null `id`. @@ -17,14 +18,14 @@ A +{request}+ object requires a non-null `id`. --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- -<1> Constructing a new request referencing an existing {dataframe-transform} -<2> Sets the optional argument `force`. When `true`, the {dataframe-transform} +<1> Constructing a new request referencing an existing {transform} +<2> Sets the optional argument `force`. When `true`, the {transform} is deleted regardless of its current state. The default value is `false`, -meaning that only `stopped` {dataframe-transforms} can be deleted. +meaning that only `stopped` {transforms} can be deleted. include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Response -The returned +{response}+ object acknowledges the Data Frame Transform deletion. +The returned +{response}+ object acknowledges the {transform} deletion. diff --git a/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc b/docs/java-rest/high-level/transform/get_transform.asciidoc similarity index 59% rename from docs/java-rest/high-level/dataframe/get_data_frame.asciidoc rename to docs/java-rest/high-level/transform/get_transform.asciidoc index 160dc378e72..f9c8c4a0980 100644 --- a/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc +++ b/docs/java-rest/high-level/transform/get_transform.asciidoc @@ -1,25 +1,26 @@ -- -:api: get-data-frame-transform +:api: get-transform :request: GetDataFrameTransformRequest :response: GetDataFrameTransformResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Get {dataframe-transform} API +=== Get {transform} API -Retrieves configuration information about one or more {dataframe-transforms}. +Retrieves configuration information about one or more {transforms}. The API accepts a +{request}+ object and returns a +{response}+. [id="{upid}-{api}-request"] -==== Get {dataframe-transform} request +==== Get {transform} request -A +{request}+ requires either a {dataframe-transform} ID, a comma separated list -of ids or the special wildcard `_all` to get all {dataframe-transforms}. +A +{request}+ requires either a {transform} ID, a comma separated list +of ids or the special wildcard `_all` to get all {transforms}. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> Constructing a new GET request referencing an existing {dataframe-transform} +<1> Constructing a new GET request referencing an existing {transform} ==== Optional arguments @@ -30,9 +31,9 @@ The following arguments are optional. include-tagged::{doc-tests-file}[{api}-request-options] -------------------------------------------------- <1> The page parameters `from` and `size`. `from` specifies the number of -{dataframe-transforms} to skip. `size` specifies the maximum number of -{dataframe-transforms} to get. Defaults to `0` and `100` respectively. -<2> Whether to ignore if a wildcard expression matches no transforms. +{transforms} to skip. `size` specifies the maximum number of +{transforms} to get. Defaults to `0` and `100` respectively. +<2> Whether to ignore if a wildcard expression matches no {transforms}. include::../execution.asciidoc[] @@ -40,7 +41,7 @@ include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Response -The returned +{response}+ contains the requested {dataframe-transforms}. +The returned +{response}+ contains the requested {transforms}. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc b/docs/java-rest/high-level/transform/get_transform_stats.asciidoc similarity index 60% rename from docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc rename to docs/java-rest/high-level/transform/get_transform_stats.asciidoc index 76223e61c1d..7674dab7b0a 100644 --- a/docs/java-rest/high-level/dataframe/get_data_frame_stats.asciidoc +++ b/docs/java-rest/high-level/transform/get_transform_stats.asciidoc @@ -1,25 +1,26 @@ -- -:api: get-data-frame-transform-stats +:api: get-transform-stats :request: GetDataFrameTransformStatsRequest :response: GetDataFrameTransformStatsResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Get {dataframe-transform} stats API +=== Get {transform} stats API -Retrieves the operational statistics of one or more {dataframe-transforms}. +Retrieves the operational statistics of one or more {transforms}. The API accepts a +{request}+ object and returns a +{response}+. [id="{upid}-{api}-request"] -==== Get {dataframe-transform} stats request +==== Get {transform} stats request -A +{request}+ requires a data frame transform id or the special wildcard `_all` -to get the statistics for all {dataframe-transforms}. +A +{request}+ requires a {transform} id or the special wildcard `_all` +to get the statistics for all {transforms}. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> Constructing a new GET Stats request referencing an existing {dataframe-transform} +<1> Constructing a new GET Stats request referencing an existing {transform} ==== Optional arguments @@ -30,10 +31,10 @@ The following arguments are optional. include-tagged::{doc-tests-file}[{api}-request-options] -------------------------------------------------- <1> The page parameters `from` and `size`. `from` specifies the number of -{dataframe-transform} stats to skip. -`size` specifies the maximum number of {dataframe-transform} stats to get. +{transform} stats to skip. +`size` specifies the maximum number of {transform} stats to get. Defaults to `0` and `100` respectively. -<2> Whether to ignore if a wildcard expression matches no transforms. +<2> Whether to ignore if a wildcard expression matches no {transforms}. include::../execution.asciidoc[] @@ -41,15 +42,15 @@ include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Response -The returned +{response}+ contains the requested {dataframe-transform} statistics. +The returned +{response}+ contains the requested {transform} statistics. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- <1> The response contains a list of `DataFrameTransformStats` objects -<2> The running state of the transform, for example `started`, `indexing`, etc. -<3> The overall transform statistics recording the number of documents indexed etc. -<4> The progress of the current run in the transform. Supplies the number of docs left until the next checkpoint +<2> The running state of the {transform}, for example `started`, `indexing`, etc. +<3> The overall {transform} statistics recording the number of documents indexed etc. +<4> The progress of the current run in the {transform}. Supplies the number of docs left until the next checkpoint and the total number of docs expected. <5> The assigned node information if the task is currently assigned to a node and running. diff --git a/docs/java-rest/high-level/dataframe/preview_data_frame.asciidoc b/docs/java-rest/high-level/transform/preview_transform.asciidoc similarity index 68% rename from docs/java-rest/high-level/dataframe/preview_data_frame.asciidoc rename to docs/java-rest/high-level/transform/preview_transform.asciidoc index 26453e5d496..5de00f5891f 100644 --- a/docs/java-rest/high-level/dataframe/preview_data_frame.asciidoc +++ b/docs/java-rest/high-level/transform/preview_transform.asciidoc @@ -1,19 +1,20 @@ -- -:api: preview-data-frame-transform +:api: preview-transform :request: PreviewDataFrameTransformRequest :response: PreviewDataFrameTransformResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Preview {dataframe-transform} API +=== Preview {transform} API -Previews the results of a {dataframe-transform}. +Previews the results of a {transform}. The API accepts a +{request}+ object as a request and returns a +{response}+. [id="{upid}-{api}-request"] -==== Preview {dataframe-transform} request +==== Preview {transform} request -A +{request}+ takes a single argument: a valid {dataframe-transform} config. +A +{request}+ takes a single argument: a valid {transform} config. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -21,7 +22,7 @@ include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- <1> The source config from which the data should be gathered <2> The pivot config used to transform the data -<3> The configuration of the {dataframe-transform} to preview +<3> The configuration of the {transform} to preview include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc b/docs/java-rest/high-level/transform/put_transform.asciidoc similarity index 83% rename from docs/java-rest/high-level/dataframe/put_data_frame.asciidoc rename to docs/java-rest/high-level/transform/put_transform.asciidoc index 2de25fde30e..b84ba1329cd 100644 --- a/docs/java-rest/high-level/dataframe/put_data_frame.asciidoc +++ b/docs/java-rest/high-level/transform/put_transform.asciidoc @@ -1,17 +1,18 @@ -- -:api: put-data-frame-transform +:api: put-transform :request: PutDataFrameTransformRequest :response: AcknowledgedResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Put {dataframe-transform} API +=== Put {transform} API -Creates a new {dataframe-transform}. +Creates a new {transform}. The API accepts a +{request}+ object as a request and returns a +{response}+. [id="{upid}-{api}-request"] -==== Put {dataframe-transform} request +==== Put {transform} request A +{request}+ requires the following argument: @@ -19,28 +20,28 @@ A +{request}+ requires the following argument: -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> The configuration of the {dataframe-transform} to create +<1> The configuration of the {transform} to create <2> Whether or not to wait to run deferrable validations until `_start` is called. -This option should be used with care as the created {dataframe-transform} will run +This option should be used with care as the created {transform} will run with the privileges of the user creating it. Meaning, if they do not have privileges, such an error will not be visible until `_start` is called. [id="{upid}-{api}-config"] -==== {dataframe-transform-cap} configuration +==== {transform-cap} configuration The `DataFrameTransformConfig` object contains all the details about the -{dataframe-transform} configuration and contains the following arguments: +{transform} configuration and contains the following arguments: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-config] -------------------------------------------------- -<1> The {dataframe-transform} ID +<1> The {transform} ID <2> The source indices and query from which to gather data <3> The destination index and optional pipeline <4> How often to check for updates to the source indices <5> The PivotConfig -<6> Optional free text description of the transform +<6> Optional free text description of the {transform} [id="{upid}-{api}-query-config"] @@ -83,8 +84,8 @@ include-tagged::{doc-tests-file}[{api}-pivot-config] -------------------------------------------------- <1> The `GroupConfig` to use in the pivot <2> The aggregations to use -<3> The maximum paging size for the transform when pulling data -from the source. The size dynamically adjusts as the transform +<3> The maximum paging size for the {transform} when pulling data +from the source. The size dynamically adjusts as the {transform} is running to recover from and prevent OOM issues. ===== GroupConfig @@ -120,4 +121,4 @@ include::../execution.asciidoc[] ==== Response The returned +{response}+ acknowledges the successful creation of -the new {dataframe-transform} or an error if the configuration is invalid. +the new {transform} or an error if the configuration is invalid. diff --git a/docs/java-rest/high-level/dataframe/start_data_frame.asciidoc b/docs/java-rest/high-level/transform/start_transform.asciidoc similarity index 72% rename from docs/java-rest/high-level/dataframe/start_data_frame.asciidoc rename to docs/java-rest/high-level/transform/start_transform.asciidoc index b62f410180c..69aea67dc2b 100644 --- a/docs/java-rest/high-level/dataframe/start_data_frame.asciidoc +++ b/docs/java-rest/high-level/transform/start_transform.asciidoc @@ -1,16 +1,17 @@ -- -:api: start-data-frame-transform +:api: start-transform :request: StartDataFrameTransformRequest :response: StartDataFrameTransformResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Start {dataframe-transform} API +=== Start {transform} API -Starts a {dataframe-transform}. +Starts a {transform}. It accepts a +{request}+ object and responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Start {dataframe-transform} request +==== Start {transform} request A +{request}+ object requires a non-null `id`. @@ -19,7 +20,7 @@ A +{request}+ object requires a non-null `id`. include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- <1> Constructing a new start request referencing an existing -{dataframe-transform} +{transform} ==== Optional arguments @@ -29,11 +30,11 @@ The following arguments are optional. -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request-options] -------------------------------------------------- -<1> Controls the amount of time to wait until the {dataframe-transform} starts. +<1> Controls the amount of time to wait until the {transform} starts. include::../execution.asciidoc[] ==== Response -The returned +{response}+ object acknowledges the {dataframe-transform} has +The returned +{response}+ object acknowledges the {transform} has started. \ No newline at end of file diff --git a/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc b/docs/java-rest/high-level/transform/stop_transform.asciidoc similarity index 67% rename from docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc rename to docs/java-rest/high-level/transform/stop_transform.asciidoc index af364501d0d..36a1491615e 100644 --- a/docs/java-rest/high-level/dataframe/stop_data_frame.asciidoc +++ b/docs/java-rest/high-level/transform/stop_transform.asciidoc @@ -1,16 +1,17 @@ -- -:api: stop-data-frame-transform +:api: stop-transform :request: StopDataFrameTransformRequest :response: StopDataFrameTransformResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Stop {dataframe-transform} API +=== Stop {transform} API -Stops a started {dataframe-transform}. +Stops a started {transform}. It accepts a +{request}+ object and responds with a +{response}+ object. [id="{upid}-{api}-request"] -==== Stop {dataframe-transform} request +==== Stop {transform} request A +{request}+ object requires a non-null `id`. `id` can be a comma separated list of IDs or a single ID. Wildcards, `*` and `_all` are also accepted. @@ -20,7 +21,7 @@ list of IDs or a single ID. Wildcards, `*` and `_all` are also accepted. --------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- -<1> Constructing a new stop request referencing an existing {dataframe-transform} +<1> Constructing a new stop request referencing an existing {transform} ==== Optional arguments @@ -31,11 +32,11 @@ The following arguments are optional. include-tagged::{doc-tests-file}[{api}-request-options] -------------------------------------------------- <1> If true wait for the data frame task to stop before responding -<2> Controls the amount of time to wait until the {dataframe-transform} stops. -<3> Whether to ignore if a wildcard expression matches no transforms. +<2> Controls the amount of time to wait until the {transform} stops. +<3> Whether to ignore if a wildcard expression matches no {transforms}. include::../execution.asciidoc[] ==== Response -The returned +{response}+ object acknowledges the {dataframe-transform} has stopped. \ No newline at end of file +The returned +{response}+ object acknowledges the {transform} has stopped. \ No newline at end of file diff --git a/docs/java-rest/high-level/dataframe/update_data_frame.asciidoc b/docs/java-rest/high-level/transform/update_transform.asciidoc similarity index 67% rename from docs/java-rest/high-level/dataframe/update_data_frame.asciidoc rename to docs/java-rest/high-level/transform/update_transform.asciidoc index d7aaefa192c..37f60eacf47 100644 --- a/docs/java-rest/high-level/dataframe/update_data_frame.asciidoc +++ b/docs/java-rest/high-level/transform/update_transform.asciidoc @@ -1,17 +1,18 @@ -- -:api: update-data-frame-transform +:api: update-transform :request: UpdateDataFrameTransformRequest :response: UpdateDataFrameTransformResponse -- +[role="xpack"] [id="{upid}-{api}"] -=== Update {dataframe-transform} API +=== Update {transform} API -Updates an existing {dataframe-transform}. +Updates an existing {transform}. The API accepts a +{request}+ object as a request and returns a +{response}+. [id="{upid}-{api}-request"] -==== Update {dataframe-transform} request +==== Update {transform} request A +{request}+ requires the following argument: @@ -19,18 +20,18 @@ A +{request}+ requires the following argument: -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> The update configuration with which to update the {dataframe-transform}. +<1> The update configuration with which to update the {transform}. <2> The ID of the configuration to update. <3> Whether or not to wait to run deferrable validations until `_start` is called. -This option should be used with care as the created {dataframe-transform} will run +This option should be used with care as the created {transform} will run with the privileges of the user creating it. Meaning, if they do not have privileges, such an error will not be visible until `_start` is called. [id="{upid}-{api}-config"] -==== {dataframe-transform-cap} update configuration +==== {transform-cap} update configuration The `DataFrameTransformConfigUpdate` object contains all the details about updated -{dataframe-transform} configuration and contains the following arguments: +{transform} configuration and contains the following arguments: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -39,13 +40,13 @@ include-tagged::{doc-tests-file}[{api}-config] <1> The source indices and query from which to gather data. <2> The destination index and optional pipeline. <3> How often to check for updates to the source indices. -<4> How to keep the {dataframe-transform} in sync with incoming data. -<5> Optional free text description of the transform. +<4> How to keep the {transform} in sync with incoming data. +<5> Optional free text description of the {transform}. include::../execution.asciidoc[] [id="{upid}-{api}-response"] ==== Response -The returned +{response}+ contains the updated {dataframe-transform} configuration +The returned +{response}+ contains the updated {transform} configuration or an error if the update failed or is invalid. diff --git a/docs/java-rest/high-level/watcher/ack-watch.asciidoc b/docs/java-rest/high-level/watcher/ack-watch.asciidoc index 3494993d87c..7bd28fa3a59 100644 --- a/docs/java-rest/high-level/watcher/ack-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/ack-watch.asciidoc @@ -4,6 +4,7 @@ :response: AckWatchResponse -- +[role="xpack"] [id="{upid}-{api}"] === Ack watch API diff --git a/docs/java-rest/high-level/watcher/activate-watch.asciidoc b/docs/java-rest/high-level/watcher/activate-watch.asciidoc index 229e44e7a84..6cbe0344e34 100644 --- a/docs/java-rest/high-level/watcher/activate-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/activate-watch.asciidoc @@ -3,7 +3,7 @@ :request: ActivateWatchRequest :response: ActivateWatchResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Activate watch API diff --git a/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc b/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc index 2ba82be1d6f..3594fda984e 100644 --- a/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/deactivate-watch.asciidoc @@ -4,6 +4,7 @@ :response: deactivateWatchResponse :doc-tests-file: {doc-tests}/WatcherDocumentationIT.java -- +[role="xpack"] [[java-rest-high-watcher-deactivate-watch]] === Deactivate watch API diff --git a/docs/java-rest/high-level/watcher/delete-watch.asciidoc b/docs/java-rest/high-level/watcher/delete-watch.asciidoc index 3edf2e83c9f..9e438bb16b5 100644 --- a/docs/java-rest/high-level/watcher/delete-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/delete-watch.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-x-pack-watcher-delete-watch]] === Delete watch API diff --git a/docs/java-rest/high-level/watcher/execute-watch.asciidoc b/docs/java-rest/high-level/watcher/execute-watch.asciidoc index 06a282228b4..b23b0918589 100644 --- a/docs/java-rest/high-level/watcher/execute-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/execute-watch.asciidoc @@ -3,6 +3,7 @@ :request: ExecuteWatchRequest :response: ExecuteWatchResponse -- +[role="xpack"] [id="{upid}-{api}"] === Execute watch API diff --git a/docs/java-rest/high-level/watcher/get-watch.asciidoc b/docs/java-rest/high-level/watcher/get-watch.asciidoc index 3ba232aa669..540f64ca947 100644 --- a/docs/java-rest/high-level/watcher/get-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/get-watch.asciidoc @@ -3,7 +3,7 @@ :request: GetWatchRequest :response: GetWatchResponse -- - +[role="xpack"] [id="{upid}-{api}"] === Get watch API diff --git a/docs/java-rest/high-level/watcher/put-watch.asciidoc b/docs/java-rest/high-level/watcher/put-watch.asciidoc index 494fc3d4088..f3ab52181f2 100644 --- a/docs/java-rest/high-level/watcher/put-watch.asciidoc +++ b/docs/java-rest/high-level/watcher/put-watch.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[java-rest-high-x-pack-watcher-put-watch]] === Put watch API diff --git a/docs/java-rest/high-level/watcher/start-watch-service.asciidoc b/docs/java-rest/high-level/watcher/start-watch-service.asciidoc index 01334941033..6557b7a46de 100644 --- a/docs/java-rest/high-level/watcher/start-watch-service.asciidoc +++ b/docs/java-rest/high-level/watcher/start-watch-service.asciidoc @@ -3,6 +3,7 @@ :request: StartWatchServiceRequest :response: StartWatchServiceResponse -- +[role="xpack"] [id="{upid}-{api}"] === Start watch service API diff --git a/docs/java-rest/high-level/watcher/stop-watch-service.asciidoc b/docs/java-rest/high-level/watcher/stop-watch-service.asciidoc index b59db5a34f8..9eeca6b2236 100644 --- a/docs/java-rest/high-level/watcher/stop-watch-service.asciidoc +++ b/docs/java-rest/high-level/watcher/stop-watch-service.asciidoc @@ -3,6 +3,7 @@ :request: StopWatchServiceRequest :response: StopWatchServiceResponse -- +[role="xpack"] [id="{upid}-{api}"] === Stop watch service API diff --git a/docs/java-rest/high-level/watcher/watcher-stats.asciidoc b/docs/java-rest/high-level/watcher/watcher-stats.asciidoc index ddc877c2c95..d0e1837c26c 100644 --- a/docs/java-rest/high-level/watcher/watcher-stats.asciidoc +++ b/docs/java-rest/high-level/watcher/watcher-stats.asciidoc @@ -3,6 +3,7 @@ :request: WatcherStatsRequest :response: WatcherStatsResponse -- +[role="xpack"] [id="{upid}-{api}"] === Get Watcher stats API diff --git a/docs/java-rest/index.asciidoc b/docs/java-rest/index.asciidoc index d06e312bce7..212d34f663d 100644 --- a/docs/java-rest/index.asciidoc +++ b/docs/java-rest/index.asciidoc @@ -8,3 +8,5 @@ include::overview.asciidoc[] include::low-level/index.asciidoc[] include::high-level/index.asciidoc[] + +include::redirects.asciidoc[] \ No newline at end of file diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 06bd77c7710..9d55ff79ce2 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -224,7 +224,7 @@ Once the `RestClient` has been created, requests can be sent by calling either will block the calling thread and return the `Response` when the request is successful or throw an exception if it fails. `performRequestAsync` is asynchronous and accepts a `ResponseListener` argument that it calls with a -`Response` when the request is successful or with an `Exception` if it4 fails. +`Response` when the request is successful or with an `Exception` if it fails. This is synchronous: @@ -329,6 +329,28 @@ include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-async-examp <2> Handle the returned exception, due to communication error or a response with status code that indicates an error +==== Cancelling asynchronous requests + +The `performRequestAsync` method returns a `Cancellable` that exposes a single +public method called `cancel`. Such method can be called to cancel the on-going +request. Cancelling a request will result in aborting the http request through +the underlying http client. On the server side, this does not automatically +translate to the execution of that request being cancelled, which needs to be +specifically implemented in the API itself. + +The use of the `Cancellable` instance is optional and you can safely ignore this +if you don't need it. A typical usecase for this would be using this together with +frameworks like Rx Java or the Kotlin's `suspendCancellableCoRoutine`. Cancelling +no longer needed requests is a good way to avoid putting unnecessary +load on Elasticsearch. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-async-cancel] +-------------------------------------------------- +<1> Process the returned response, in case it was ready before the request got cancelled +<2> Handle the returned exception, which will most likely be a `CancellationException` as the request got cancelled + [[java-rest-low-usage-responses]] === Reading responses diff --git a/docs/java-rest/redirects.asciidoc b/docs/java-rest/redirects.asciidoc new file mode 100644 index 00000000000..a077102b405 --- /dev/null +++ b/docs/java-rest/redirects.asciidoc @@ -0,0 +1,49 @@ +["appendix",role="exclude",id="redirects"] += Deleted pages + +The following pages have moved or been deleted. + +[role="exclude",id="_data_frame_transform_apis"] +=== {transform-cap} APIs + +See <>. + +[role="exclude",id="java-rest-high-dataframe-get-data-frame-transform"] +=== Get {transform} API + +See <>. + +[role="exclude",id="java-rest-high-dataframe-get-data-frame-transform-stats"] +=== Get {transform} stats API + +See <>. + +[role="exclude",id="java-rest-high-dataframe-put-data-frame-transform"] +=== Put {transform} API + +See <>. + +[role="exclude",id="java-rest-high-dataframe-update-data-frame-transform"] +=== Update {transform} API + +See <>. + +[role="exclude",id="java-rest-high-dataframe-delete-data-frame-transform"] +=== Delete {transform} API + +See <>. + +[role="exclude",id="java-rest-high-dataframe-preview-data-frame-transform"] +=== Preview {transform} API + +See <>. + +[role="exclude",id="java-rest-high-dataframe-start-data-frame-transform"] +=== Start {transform} API + +See <>. + +[role="exclude",id="java-rest-high-dataframe-stop-data-frame-transform"] +=== Stop {transform} API + +See <>. diff --git a/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc index 5a530601694..b97e3057077 100644 --- a/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc +++ b/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc @@ -38,7 +38,7 @@ and adds the user-specified base_cost to the result: Note that the values are extracted from the `params` map. In context, the aggregation looks like this: -[source,js] +[source,console] -------------------------------------------------- GET /seats/_search { @@ -79,8 +79,8 @@ GET /seats/_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:seats] + <1> The `buckets_path` points to two aggregations (`min_cost`, `max_cost`) and adds `min`/`max` variables to the `params` map <2> The user-specified `base_cost` is also added to the script's `params` map \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc index 69fbce1d082..13fe69cefae 100644 --- a/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc +++ b/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc @@ -39,7 +39,7 @@ params.max + params.base_cost > 10 Note that the values are extracted from the `params` map. The script is in the form of an expression that returns `true` or `false`. In context, the aggregation looks like this: -[source,js] +[source,console] -------------------------------------------------- GET /seats/_search { @@ -74,8 +74,8 @@ GET /seats/_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:seats] + <1> The `buckets_path` points to the max aggregations (`max_cost`) and adds `max` variables to the `params` map <2> The user-specified `base_cost` is also added to the `params` map diff --git a/docs/painless/painless-contexts/painless-context-examples.asciidoc b/docs/painless/painless-contexts/painless-context-examples.asciidoc index e840f34f33a..a451b1e89ca 100644 --- a/docs/painless/painless-contexts/painless-context-examples.asciidoc +++ b/docs/painless/painless-contexts/painless-context-examples.asciidoc @@ -41,7 +41,7 @@ the request URL. . Create {ref}/mapping.html[mappings] for the sample data: + -[source,js] +[source,console] ---- PUT /seats { @@ -62,7 +62,6 @@ PUT /seats } ---- + -// CONSOLE . Run the <> example. This sets up a script ingest processor used on each document as the diff --git a/docs/painless/painless-contexts/painless-field-context.asciidoc b/docs/painless/painless-contexts/painless-field-context.asciidoc index 83f8eef3629..6fc1693cfa0 100644 --- a/docs/painless/painless-contexts/painless-field-context.asciidoc +++ b/docs/painless/painless-contexts/painless-field-context.asciidoc @@ -59,7 +59,7 @@ params['_source']['actors'].length; <1> Submit the following request: -[source,js] +[source,console] ---- GET seats/_search { @@ -80,5 +80,4 @@ GET seats/_search } } ---- -// CONSOLE // TEST[skip: requires setup from other pages] \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-filter-context.asciidoc b/docs/painless/painless-contexts/painless-filter-context.asciidoc index 4c25e1af2c7..a7871d89574 100644 --- a/docs/painless/painless-contexts/painless-filter-context.asciidoc +++ b/docs/painless/painless-contexts/painless-filter-context.asciidoc @@ -41,7 +41,7 @@ Defining cost as a script parameter enables the cost to be configured in the script query request. For example, the following request finds all available theatre seats for evening performances that are under $18. -[source,js] +[source,console] ---- GET seats/_search { @@ -61,5 +61,4 @@ GET seats/_search } } ---- -// CONSOLE // TEST[skip: requires setup from other pages] \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc b/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc index 546057ab1a0..1556890a7d4 100644 --- a/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc +++ b/docs/painless/painless-contexts/painless-ingest-processor-context.asciidoc @@ -178,7 +178,7 @@ ctx.datetime = dt.getLong(ChronoField.INSTANT_SECONDS)*1000L; <15> Submit the following request: -[source,js] +[source,console] ---- PUT /_ingest/pipeline/seats { @@ -192,4 +192,3 @@ PUT /_ingest/pipeline/seats ] } ---- -// CONSOLE \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc b/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc index cd476481381..ac09f12da01 100644 --- a/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc +++ b/docs/painless/painless-contexts/painless-min-should-match-context.asciidoc @@ -52,7 +52,7 @@ Math.min(params['num_terms'], params['min_actors_to_see']) The following request finds seats to performances with at least two of the three specified actors. -[source,js] +[source,console] ---- GET seats/_search { @@ -71,6 +71,5 @@ GET seats/_search } } ---- -// CONSOLE // TEST[skip: requires setup from other pages] diff --git a/docs/painless/painless-contexts/painless-score-context.asciidoc b/docs/painless/painless-contexts/painless-score-context.asciidoc index 2bec9021c17..14a28a228a6 100644 --- a/docs/painless/painless-contexts/painless-score-context.asciidoc +++ b/docs/painless/painless-contexts/painless-score-context.asciidoc @@ -36,7 +36,7 @@ To run this example, first follow the steps in The following query finds all unsold seats, with lower 'row' values scored higher. -[source,js] +[source,console] -------------------------------------------------- GET /seats/_search { @@ -54,5 +54,4 @@ GET /seats/_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:seats] \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-sort-context.asciidoc b/docs/painless/painless-contexts/painless-sort-context.asciidoc index cf57febcc70..ee5baa1b046 100644 --- a/docs/painless/painless-contexts/painless-sort-context.asciidoc +++ b/docs/painless/painless-contexts/painless-sort-context.asciidoc @@ -34,7 +34,7 @@ To run this example, first follow the steps in To sort results by the length of the `theatre` field, submit the following query: -[source,js] +[source,console] ---- GET /_search { @@ -57,5 +57,4 @@ GET /_search } ---- -// CONSOLE // TEST[setup:seats] \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc index ba42105f2e9..e4cafb7a134 100644 --- a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc +++ b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc @@ -61,7 +61,7 @@ To run this example, first follow the steps in The following query finds all seats in a specific section that have not been sold and lowers the price by 2: -[source,js] +[source,console] -------------------------------------------------- POST /seats/_update_by_query { @@ -91,5 +91,4 @@ POST /seats/_update_by_query } } -------------------------------------------------- -// CONSOLE // TEST[setup:seats] \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-update-context.asciidoc b/docs/painless/painless-contexts/painless-update-context.asciidoc index 6ed8c2f7c13..20a1c936b2e 100644 --- a/docs/painless/painless-contexts/painless-update-context.asciidoc +++ b/docs/painless/painless-contexts/painless-update-context.asciidoc @@ -62,7 +62,7 @@ To run this example, first follow the steps in The following query updates a document to be sold, and sets the cost to the actual price paid after discounts: -[source,js] +[source,console] -------------------------------------------------- POST /seats/_update/3 { @@ -75,5 +75,4 @@ POST /seats/_update/3 } } -------------------------------------------------- -// CONSOLE // TEST[setup:seats] \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc index d2ca9cc993c..01ca156be14 100644 --- a/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-condition-context.asciidoc @@ -18,7 +18,7 @@ The standard <> is available. *Example* -[source,js] +[source,console] ---- POST _watcher/watch/_execute { @@ -65,7 +65,6 @@ POST _watcher/watch/_execute } } ---- -// CONSOLE // TEST[skip: requires setup from other pages] <1> The Java Stream API is used in the condition. This API allows manipulation of @@ -78,7 +77,7 @@ on the value of the seats sold for the plays in the data set. The script aggrega the total sold seats for each play and returns true if there is at least one play that has sold over $50,000. -[source,js] +[source,console] ---- POST _watcher/watch/_execute { @@ -123,7 +122,6 @@ POST _watcher/watch/_execute } } ---- -// CONSOLE // TEST[skip: requires setup from other pages] This example uses a nearly identical condition as the previous example. The diff --git a/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc b/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc index 5996e2ddc98..db1394416b6 100644 --- a/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-context-example.asciidoc @@ -1,4 +1,4 @@ -[source,js] +[source,console] ---- POST _watcher/watch/_execute { @@ -99,12 +99,11 @@ POST _watcher/watch/_execute } } ---- -// CONSOLE // TEST[skip: requires setup from other pages] The following example shows the use of metadata and transforming dates into a readable format. -[source,js] +[source,console] ---- POST _watcher/watch/_execute { @@ -157,5 +156,4 @@ POST _watcher/watch/_execute } } ---- -// CONSOLE // TEST[skip: requires setup from other pages] \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc index 408d673f6ed..d96ab5f1521 100644 --- a/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc +++ b/docs/painless/painless-contexts/painless-watcher-transform-context.asciidoc @@ -18,7 +18,7 @@ The standard <> is available. *Example* -[source,js] +[source,console] ---- POST _watcher/watch/_execute { @@ -75,7 +75,6 @@ POST _watcher/watch/_execute } } ---- -// CONSOLE // TEST[skip: requires setup from other pages] <1> The Java Stream API is used in the transform. This API allows manipulation of @@ -88,7 +87,7 @@ the elements of the list in a pipeline. The following action transform changes each value in the mod_log action into a `String`. This transform does not change the values in the unmod_log action. -[source,js] +[source,console] ---- POST _watcher/watch/_execute { @@ -142,7 +141,6 @@ POST _watcher/watch/_execute } } ---- -// CONSOLE // TEST[skip: requires setup from other pages] This example uses the streaming API in a very similar manner. The differences below are diff --git a/docs/painless/painless-guide/painless-datetime.asciidoc b/docs/painless/painless-guide/painless-datetime.asciidoc index 97854d9ad9d..68d93f4e369 100644 --- a/docs/painless/painless-guide/painless-datetime.asciidoc +++ b/docs/painless/painless-guide/painless-datetime.asciidoc @@ -728,7 +728,7 @@ examples into an Elasticsearch cluster: . Create {ref}/mapping.html[mappings] for the sample data. + -[source,js] +[source,console] ---- PUT /messages { @@ -748,11 +748,9 @@ PUT /messages } ---- + -// CONSOLE -+ . Load the sample data. + -[source,js] +[source,console] ---- POST /_bulk { "index" : { "_index" : "messages", "_id" : "1" } } @@ -776,8 +774,6 @@ POST /_bulk { "index" : { "_index" : "messages", "_id" : "10" } } { "priority": 2, "datetime": "2019-07-23T23:39:54Z", "message": "m10" } ---- -+ -// CONSOLE // TEST[continued] ===== Day-of-the-Week Bucket Aggregation Example @@ -788,7 +784,7 @@ as part of the <> to display the number of messages from each day-of-the-week. -[source,js] +[source,console] ---- GET /messages/_search?pretty=true { @@ -801,7 +797,6 @@ GET /messages/_search?pretty=true } } ---- -// CONSOLE // TEST[continued] ===== Morning/Evening Bucket Aggregation Example @@ -812,7 +807,7 @@ as part of the <> to display the number of messages received in the morning versus the evening. -[source,js] +[source,console] ---- GET /messages/_search?pretty=true { @@ -825,7 +820,6 @@ GET /messages/_search?pretty=true } } ---- -// CONSOLE // TEST[continued] ===== Age of a Message Script Field Example @@ -835,7 +829,7 @@ The following example uses a <> to display the elapsed time between "now" and when a message was received. -[source,js] +[source,console] ---- GET /_search?pretty=true { @@ -854,7 +848,6 @@ GET /_search?pretty=true } } ---- -// CONSOLE // TEST[continued] The following shows the script broken into multiple lines: diff --git a/docs/painless/painless-guide/painless-debugging.asciidoc b/docs/painless/painless-guide/painless-debugging.asciidoc index fe56cb25f1e..ef1045b3ac8 100644 --- a/docs/painless/painless-guide/painless-debugging.asciidoc +++ b/docs/painless/painless-guide/painless-debugging.asciidoc @@ -16,7 +16,7 @@ utility method, `Debug.explain` which throws the exception for you. For example, you can use {ref}/search-explain.html[`_explain`] to explore the context available to a {ref}/query-dsl-script-query.html[script query]. -[source,js] +[source,console] --------------------------------------------------------- PUT /hockey/_doc/1?refresh {"first":"johnny","last":"gaudreau","goals":[9,27,1],"assists":[17,46,0],"gp":[26,82,1]} @@ -30,7 +30,6 @@ POST /hockey/_explain/1 } } --------------------------------------------------------- -// CONSOLE // TEST[s/_explain\/1/_explain\/1?error_trace=false/ catch:/painless_explain_error/] // The test system sends error_trace=true by default for easier debugging so // we have to override it to get a normal shaped response @@ -56,14 +55,13 @@ Which shows that the class of `doc.first` is You can use the same trick to see that `_source` is a `LinkedHashMap` in the `_update` API: -[source,js] +[source,console] --------------------------------------------------------- POST /hockey/_update/1 { "script": "Debug.explain(ctx._source)" } --------------------------------------------------------- -// CONSOLE // TEST[continued s/_update\/1/_update\/1?error_trace=false/ catch:/painless_explain_error/] The response looks like: diff --git a/docs/painless/painless-guide/painless-walkthrough.asciidoc b/docs/painless/painless-guide/painless-walkthrough.asciidoc index 70089a08726..d414418a908 100644 --- a/docs/painless/painless-guide/painless-walkthrough.asciidoc +++ b/docs/painless/painless-guide/painless-walkthrough.asciidoc @@ -3,7 +3,7 @@ To illustrate how Painless works, let's load some hockey stats into an Elasticsearch index: -[source,js] +[source,console] ---------------------------------------------------------------- PUT hockey/_bulk?refresh {"index":{"_id":1}} @@ -29,7 +29,6 @@ PUT hockey/_bulk?refresh {"index":{"_id":11}} {"first":"joe","last":"colborne","goals":[3,18,13],"assists":[6,20,24],"gp":[26,67,82],"born":"1990/01/30"} ---------------------------------------------------------------- -// CONSOLE // TESTSETUP [float] @@ -39,7 +38,7 @@ Document values can be accessed from a `Map` named `doc`. For example, the following script calculates a player's total goals. This example uses a strongly typed `int` and a `for` loop. -[source,js] +[source,console] ---------------------------------------------------------------- GET hockey/_search { @@ -61,11 +60,10 @@ GET hockey/_search } } ---------------------------------------------------------------- -// CONSOLE Alternatively, you could do the same thing using a script field instead of a function score: -[source,js] +[source,console] ---------------------------------------------------------------- GET hockey/_search { @@ -88,12 +86,11 @@ GET hockey/_search } } ---------------------------------------------------------------- -// CONSOLE The following example uses a Painless script to sort the players by their combined first and last names. The names are accessed using `doc['first'].value` and `doc['last'].value`. -[source,js] +[source,console] ---------------------------------------------------------------- GET hockey/_search { @@ -112,7 +109,6 @@ GET hockey/_search } } ---------------------------------------------------------------- -// CONSOLE [float] @@ -132,7 +128,7 @@ You can also easily update fields. You access the original source for a field as First, let's look at the source data for a player by submitting the following request: -[source,js] +[source,console] ---------------------------------------------------------------- GET hockey/_search { @@ -147,11 +143,10 @@ GET hockey/_search } } ---------------------------------------------------------------- -// CONSOLE To change player 1's last name to `hockey`, simply set `ctx._source.last` to the new value: -[source,js] +[source,console] ---------------------------------------------------------------- POST hockey/_update/1 { @@ -164,12 +159,11 @@ POST hockey/_update/1 } } ---------------------------------------------------------------- -// CONSOLE You can also add fields to a document. For example, this script adds a new field that contains the player's nickname, _hockey_. -[source,js] +[source,console] ---------------------------------------------------------------- POST hockey/_update/1 { @@ -186,7 +180,6 @@ POST hockey/_update/1 } } ---------------------------------------------------------------- -// CONSOLE [float] [[modules-scripting-painless-dates]] @@ -199,7 +192,7 @@ in a script, leave out the `get` prefix and continue with lowercasing the rest of the method name. For example, the following returns every hockey player's birth year: -[source,js] +[source,console] ---------------------------------------------------------------- GET hockey/_search { @@ -212,7 +205,6 @@ GET hockey/_search } } ---------------------------------------------------------------- -// CONSOLE [float] [[modules-scripting-painless-regex]] @@ -241,7 +233,7 @@ text matches, `false` otherwise. Using the find operator (`=~`) you can update all hockey players with "b" in their last name: -[source,js] +[source,console] ---------------------------------------------------------------- POST hockey/_update_by_query { @@ -257,12 +249,11 @@ POST hockey/_update_by_query } } ---------------------------------------------------------------- -// CONSOLE Using the match operator (`==~`) you can update all the hockey players whose names start with a consonant and end with a vowel: -[source,js] +[source,console] ---------------------------------------------------------------- POST hockey/_update_by_query { @@ -278,12 +269,11 @@ POST hockey/_update_by_query } } ---------------------------------------------------------------- -// CONSOLE You can use the `Pattern.matcher` directly to get a `Matcher` instance and remove all of the vowels in all of their last names: -[source,js] +[source,console] ---------------------------------------------------------------- POST hockey/_update_by_query { @@ -293,13 +283,12 @@ POST hockey/_update_by_query } } ---------------------------------------------------------------- -// CONSOLE `Matcher.replaceAll` is just a call to Java's `Matcher`'s http://docs.oracle.com/javase/8/docs/api/java/util/regex/Matcher.html#replaceAll-java.lang.String-[replaceAll] method so it supports `$1` and `\1` for replacements: -[source,js] +[source,console] ---------------------------------------------------------------- POST hockey/_update_by_query { @@ -309,7 +298,6 @@ POST hockey/_update_by_query } } ---------------------------------------------------------------- -// CONSOLE If you need more control over replacements you can call `replaceAll` on a `CharSequence` with a `Function` that builds the replacement. @@ -321,7 +309,7 @@ replacement is rude and will likely break the replacement process. This will make all of the vowels in the hockey player's last names upper case: -[source,js] +[source,console] ---------------------------------------------------------------- POST hockey/_update_by_query { @@ -334,12 +322,11 @@ POST hockey/_update_by_query } } ---------------------------------------------------------------- -// CONSOLE Or you can use the `CharSequence.replaceFirst` to make the first vowel in their last names upper case: -[source,js] +[source,console] ---------------------------------------------------------------- POST hockey/_update_by_query { @@ -352,8 +339,6 @@ POST hockey/_update_by_query } } ---------------------------------------------------------------- -// CONSOLE - Note: all of the `_update_by_query` examples above could really do with a `query` to limit the data that they pull back. While you *could* use a diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc index a189bd1adad..7c87ddc3b4e 100644 --- a/docs/plugins/analysis-icu.asciidoc +++ b/docs/plugins/analysis-icu.asciidoc @@ -62,7 +62,7 @@ http://icu-project.org/apiref/icu4j/com/ibm/icu/text/UnicodeSet.html[UnicodeSet] Here are two examples, the default usage and a customised character filter: -[source,js] +[source,console] -------------------------------------------------- PUT icu_sample { @@ -95,7 +95,6 @@ PUT icu_sample } } -------------------------------------------------- -// CONSOLE <1> Uses the default `nfkc_cf` normalization. <2> Uses the customized `nfd_normalizer` token filter, which is set to use `nfc` normalization with decomposition. @@ -110,7 +109,7 @@ but adds better support for some Asian languages by using a dictionary-based approach to identify words in Thai, Lao, Chinese, Japanese, and Korean, and using custom rules to break Myanmar and Khmer text into syllables. -[source,js] +[source,console] -------------------------------------------------- PUT icu_sample { @@ -127,7 +126,6 @@ PUT icu_sample } } -------------------------------------------------- -// CONSOLE ===== Rules customization @@ -151,7 +149,7 @@ As a demonstration of how the rule files can be used, save the following user fi Then create an analyzer to use this rule file as follows: -[source,js] +[source,console] -------------------------------------------------- PUT icu_sample { @@ -181,7 +179,6 @@ GET icu_sample/_analyze "text": "Elasticsearch. Wow!" } -------------------------------------------------- -// CONSOLE The above `analyze` request returns the following: @@ -219,7 +216,7 @@ You should probably prefer the < Uses the default `nfkc_cf` normalization. <2> Uses the customized `nfc_normalizer` token filter, which is set to use `nfc` normalization. @@ -265,7 +261,7 @@ Case folding of Unicode characters based on `UTR#30`, like the on steroids. It registers itself as the `icu_folding` token filter and is available to all indices: -[source,js] +[source,console] -------------------------------------------------- PUT icu_sample { @@ -285,7 +281,6 @@ PUT icu_sample } } -------------------------------------------------- -// CONSOLE The ICU folding token filter already does Unicode normalization, so there is no need to use Normalize character or token filter as well. @@ -299,7 +294,7 @@ to note that both upper and lowercase forms should be specified, and that these filtered character are not lowercased which is why we add the `lowercase` filter as well: -[source,js] +[source,console] -------------------------------------------------- PUT icu_sample { @@ -326,7 +321,6 @@ PUT icu_sample } } -------------------------------------------------- -// CONSOLE [[analysis-icu-collation]] @@ -352,7 +346,7 @@ which is a best-effort attempt at language-neutral sorting. Below is an example of how to set up a field for sorting German names in ``phonebook'' order: -[source,js] +[source,console] -------------------------- PUT my_index { @@ -385,7 +379,6 @@ GET _search <3> } -------------------------- -// CONSOLE <1> The `name` field uses the `standard` analyzer, and so support full text queries. <2> The `name.sort` field is an `icu_collation_keyword` field that will preserve the name as @@ -507,7 +500,7 @@ rulesets are not yet supported. For example: -[source,js] +[source,console] -------------------------------------------------- PUT icu_sample { @@ -552,7 +545,6 @@ GET icu_sample/_analyze } -------------------------------------------------- -// CONSOLE <1> This transforms transliterates characters to Latin, and separates accents from their base characters, removes the accents, and then puts the diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc index 45217310452..94c767f1054 100644 --- a/docs/plugins/analysis-kuromoji.asciidoc +++ b/docs/plugins/analysis-kuromoji.asciidoc @@ -103,7 +103,7 @@ dictionary to `$ES_HOME/config/userdict_ja.txt`: You can also inline the rules directly in the tokenizer definition using the `user_dictionary_rules` option: -[source,js] +[source,console] -------------------------------------------------- PUT nori_sample { @@ -128,7 +128,6 @@ PUT nori_sample } } -------------------------------------------------- -// CONSOLE -- `nbest_cost`/`nbest_examples`:: @@ -155,7 +154,7 @@ If both parameters are used, the largest number of both is applied. Then create an analyzer as follows: -[source,js] +[source,console] -------------------------------------------------- PUT kuromoji_sample { @@ -187,7 +186,6 @@ GET kuromoji_sample/_analyze "text": "東京スカイツリー" } -------------------------------------------------- -// CONSOLE The above `analyze` request returns the following: @@ -217,7 +215,7 @@ The above `analyze` request returns the following: The `kuromoji_baseform` token filter replaces terms with their BaseFormAttribute. This acts as a lemmatizer for verbs and adjectives. Example: -[source,js] +[source,console] -------------------------------------------------- PUT kuromoji_sample { @@ -243,7 +241,6 @@ GET kuromoji_sample/_analyze "text": "飲み" } -------------------------------------------------- -// CONSOLE which responds with: @@ -274,7 +271,7 @@ part-of-speech tags. It accepts the following setting: For example: -[source,js] +[source,console] -------------------------------------------------- PUT kuromoji_sample { @@ -309,7 +306,6 @@ GET kuromoji_sample/_analyze "text": "寿司がおいしいね" } -------------------------------------------------- -// CONSOLE Which responds with: @@ -348,7 +344,7 @@ to `true`. The default when defining a custom `kuromoji_readingform`, however, is `false`. The only reason to use the custom form is if you need the katakana reading form: -[source,js] +[source,console] -------------------------------------------------- PUT kuromoji_sample { @@ -392,7 +388,6 @@ GET kuromoji_sample/_analyze "text": "寿司" <2> } -------------------------------------------------- -// CONSOLE <1> Returns `スシ`. <2> Returns `sushi`. @@ -412,7 +407,7 @@ This token filter accepts the following setting: is `4`). -[source,js] +[source,console] -------------------------------------------------- PUT kuromoji_sample { @@ -450,7 +445,6 @@ GET kuromoji_sample/_analyze "text": "サーバー" <2> } -------------------------------------------------- -// CONSOLE <1> Returns `コピー`. <2> Return `サーバ`. @@ -465,7 +459,7 @@ the predefined `_japanese_` stopwords list. If you want to use a different predefined list, then use the {ref}/analysis-stop-tokenfilter.html[`stop` token filter] instead. -[source,js] +[source,console] -------------------------------------------------- PUT kuromoji_sample { @@ -500,7 +494,6 @@ GET kuromoji_sample/_analyze "text": "ストップは消える" } -------------------------------------------------- -// CONSOLE The above request returns: @@ -524,7 +517,7 @@ The above request returns: The `kuromoji_number` token filter normalizes Japanese numbers (kansūji) to regular Arabic decimal numbers in half-width characters. For example: -[source,js] +[source,console] -------------------------------------------------- PUT kuromoji_sample { @@ -550,7 +543,6 @@ GET kuromoji_sample/_analyze "text": "一〇〇〇" } -------------------------------------------------- -// CONSOLE Which results in: diff --git a/docs/plugins/analysis-nori.asciidoc b/docs/plugins/analysis-nori.asciidoc index 7ed7d4c857a..7cc04c9c3de 100644 --- a/docs/plugins/analysis-nori.asciidoc +++ b/docs/plugins/analysis-nori.asciidoc @@ -88,7 +88,7 @@ C샤프 Then create an analyzer as follows: -[source,js] +[source,console] -------------------------------------------------- PUT nori_sample { @@ -119,7 +119,6 @@ GET nori_sample/_analyze "text": "세종시" <1> } -------------------------------------------------- -// CONSOLE <1> Sejong city @@ -161,7 +160,7 @@ The above `analyze` request returns the following: You can also inline the rules directly in the tokenizer definition using the `user_dictionary_rules` option: -[source,js] +[source,console] -------------------------------------------------- PUT nori_sample { @@ -186,14 +185,13 @@ PUT nori_sample } } -------------------------------------------------- -// CONSOLE -- The `nori_tokenizer` sets a number of additional attributes per token that are used by token filters to modify the stream. You can view all these additional attributes with the following request: -[source,js] +[source,console] -------------------------------------------------- GET _analyze { @@ -203,7 +201,6 @@ GET _analyze "explain": true } -------------------------------------------------- -// CONSOLE <1> A tree with deep roots @@ -329,7 +326,7 @@ and defaults to: For example: -[source,js] +[source,console] -------------------------------------------------- PUT nori_sample { @@ -363,7 +360,6 @@ GET nori_sample/_analyze "text": "여섯 용이" <2> } -------------------------------------------------- -// CONSOLE <1> Korean numerals should be removed (`NR`) <2> Six dragons @@ -395,7 +391,7 @@ Which responds with: The `nori_readingform` token filter rewrites tokens written in Hanja to their Hangul form. -[source,js] +[source,console] -------------------------------------------------- PUT nori_sample { @@ -419,7 +415,6 @@ GET nori_sample/_analyze "text": "鄕歌" <1> } -------------------------------------------------- -// CONSOLE <1> A token written in Hanja: Hyangga diff --git a/docs/plugins/analysis-phonetic.asciidoc b/docs/plugins/analysis-phonetic.asciidoc index 3627751670a..6e81e8efdca 100644 --- a/docs/plugins/analysis-phonetic.asciidoc +++ b/docs/plugins/analysis-phonetic.asciidoc @@ -27,7 +27,7 @@ The `phonetic` token filter takes the following settings: token. Accepts `true` (default) and `false`. Not supported by `beider_morse` encoding. -[source,js] +[source,console] -------------------------------------------------- PUT phonetic_sample { @@ -61,7 +61,6 @@ GET phonetic_sample/_analyze "text": "Joe Bloggs" <1> } -------------------------------------------------- -// CONSOLE <1> Returns: `J`, `joe`, `BLKS`, `bloggs` diff --git a/docs/plugins/analysis-smartcn.asciidoc b/docs/plugins/analysis-smartcn.asciidoc index a67ac3edd57..1296a28abd6 100644 --- a/docs/plugins/analysis-smartcn.asciidoc +++ b/docs/plugins/analysis-smartcn.asciidoc @@ -27,7 +27,7 @@ NOTE: The `smartcn_word` token filter and `smartcn_sentence` have been deprecate The `smartcn` analyzer could be reimplemented as a `custom` analyzer that can then be extended and configured as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT smartcn_example { @@ -46,7 +46,6 @@ PUT smartcn_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: smartcn_example, first: smartcn, second: rebuilt_smartcn}\nendyaml\n/] [[analysis-smartcn_stop]] @@ -58,7 +57,7 @@ This filter only supports the predefined `_smartcn_` stopwords list. If you want to use a different predefined list, then use the {ref}/analysis-stop-tokenfilter.html[`stop` token filter] instead. -[source,js] +[source,console] -------------------------------------------------- PUT smartcn_example { @@ -95,7 +94,6 @@ GET smartcn_example/_analyze "text": "哈喽,我们是 Elastic 我们是 Elastic Stack(Elasticsearch、Kibana、Beats 和 Logstash)的开发公司。从股票行情到 Twitter 消息流,从 Apache 日志到 WordPress 博文,我们可以帮助人们体验搜索的强大力量,帮助他们以截然不同的方式探索和分析数据" } -------------------------------------------------- -// CONSOLE The above request returns: diff --git a/docs/plugins/analysis-stempel.asciidoc b/docs/plugins/analysis-stempel.asciidoc index ea37cf0228e..6afa88013c1 100644 --- a/docs/plugins/analysis-stempel.asciidoc +++ b/docs/plugins/analysis-stempel.asciidoc @@ -22,7 +22,7 @@ which are not configurable. The `polish` analyzer could be reimplemented as a `custom` analyzer that can then be extended and configured differently as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /stempel_example { @@ -42,7 +42,6 @@ PUT /stempel_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: stempel_example, first: polish, second: rebuilt_stempel}\nendyaml\n/] [[analysis-polish-stop]] @@ -54,7 +53,7 @@ the predefined `_polish_` stopwords list. If you want to use a different predefined list, then use the {ref}/analysis-stop-tokenfilter.html[`stop` token filter] instead. -[source,js] +[source,console] -------------------------------------------------- PUT /polish_stop_example { @@ -90,7 +89,6 @@ GET polish_stop_example/_analyze "text": "Gdzie kucharek sześć, tam nie ma co jeść." } -------------------------------------------------- -// CONSOLE The above request returns: diff --git a/docs/plugins/discovery-azure-classic.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc index 949426513c5..2f580fbd24d 100644 --- a/docs/plugins/discovery-azure-classic.asciidoc +++ b/docs/plugins/discovery-azure-classic.asciidoc @@ -355,11 +355,10 @@ sudo dpkg -i elasticsearch-{version}.deb Check that Elasticsearch is running: -[source,js] +[source,console] ---- GET / ---- -// CONSOLE This command should give you a JSON result: diff --git a/docs/plugins/ingest-attachment.asciidoc b/docs/plugins/ingest-attachment.asciidoc index d0c2eba0f65..e2498a3b873 100644 --- a/docs/plugins/ingest-attachment.asciidoc +++ b/docs/plugins/ingest-attachment.asciidoc @@ -32,7 +32,7 @@ include::install_remove.asciidoc[] For example, this: -[source,js] +[source,console] -------------------------------------------------- PUT _ingest/pipeline/attachment { @@ -51,7 +51,6 @@ PUT my_index/_doc/my_id?pipeline=attachment } GET my_index/_doc/my_id -------------------------------------------------- -// CONSOLE Returns this: @@ -81,7 +80,7 @@ Returns this: To specify only some fields to be extracted: -[source,js] +[source,console] -------------------------------------------------- PUT _ingest/pipeline/attachment { @@ -96,7 +95,6 @@ PUT _ingest/pipeline/attachment ] } -------------------------------------------------- -// CONSOLE NOTE: Extracting contents from binary data is a resource intensive operation and consumes a lot of resources. It is highly recommended to run pipelines @@ -115,7 +113,7 @@ setting. For example: -[source,js] +[source,console] -------------------------------------------------- PUT _ingest/pipeline/attachment { @@ -136,7 +134,6 @@ PUT my_index/_doc/my_id?pipeline=attachment } GET my_index/_doc/my_id -------------------------------------------------- -// CONSOLE Returns this: @@ -164,7 +161,7 @@ Returns this: // TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] -[source,js] +[source,console] -------------------------------------------------- PUT _ingest/pipeline/attachment { @@ -186,7 +183,6 @@ PUT my_index/_doc/my_id_2?pipeline=attachment } GET my_index/_doc/my_id_2 -------------------------------------------------- -// CONSOLE Returns this: @@ -247,7 +243,7 @@ of the attachments field and insert the properties into the document so the following `foreach` processor is used: -[source,js] +[source,console] -------------------------------------------------- PUT _ingest/pipeline/attachment { @@ -281,7 +277,6 @@ PUT my_index/_doc/my_id?pipeline=attachment } GET my_index/_doc/my_id -------------------------------------------------- -// CONSOLE Returns this: diff --git a/docs/plugins/mapper-annotated-text.asciidoc b/docs/plugins/mapper-annotated-text.asciidoc index a148cec76ba..e6d1628b717 100644 --- a/docs/plugins/mapper-annotated-text.asciidoc +++ b/docs/plugins/mapper-annotated-text.asciidoc @@ -22,7 +22,7 @@ The `annotated-text` tokenizes text content as per the more common `text` field "limitations" below) but also injects any marked-up annotation tokens directly into the search index: -[source,js] +[source,console] -------------------------- PUT my_index { @@ -35,7 +35,6 @@ PUT my_index } } -------------------------- -// CONSOLE Such a mapping would allow marked-up text eg wikipedia articles to be indexed as both text and structured tokens. The annotations use a markdown-like syntax using URL encoding of @@ -110,7 +109,7 @@ We can now perform searches for annotations using regular `term` queries that do the provided search values. Annotations are a more precise way of matching as can be seen in this example where a search for `Beck` will not match `Jeff Beck` : -[source,js] +[source,console] -------------------------- # Example documents PUT my_index/_doc/1 @@ -133,7 +132,6 @@ GET my_index/_search } } -------------------------- -// CONSOLE <1> As well as tokenising the plain text into single words e.g. `beck`, here we inject the single token value `Beck` at the same position as `beck` in the token stream. @@ -164,7 +162,7 @@ entity IDs woven into text. These IDs can be embedded as annotations in an annotated_text field but it often makes sense to include them in dedicated structured fields to support discovery via aggregations: -[source,js] +[source,console] -------------------------- PUT my_index { @@ -185,11 +183,10 @@ PUT my_index } } -------------------------- -// CONSOLE Applications would then typically provide content and discover it as follows: -[source,js] +[source,console] -------------------------- # Example documents PUT my_index/_doc/1 @@ -215,7 +212,6 @@ GET my_index/_search } } -------------------------- -// CONSOLE <1> Note the `my_twitter_handles` contains a list of the annotation values also used in the unstructured text. (Note the annotated_text syntax requires escaping). @@ -265,7 +261,7 @@ they don't name clash with text tokens e.g. The `annotated-text` plugin includes a custom highlighter designed to mark up search hits in a way which is respectful of the original markup: -[source,js] +[source,console] -------------------------- # Example documents PUT my_index/_doc/1 @@ -290,7 +286,7 @@ GET my_index/_search } } -------------------------- -// CONSOLE + <1> The `annotated` highlighter type is designed for use with annotated_text fields The annotated highlighter is based on the `unified` highlighter and supports the same diff --git a/docs/plugins/mapper-murmur3.asciidoc b/docs/plugins/mapper-murmur3.asciidoc index 7d8a5c74102..f9a42d505b8 100644 --- a/docs/plugins/mapper-murmur3.asciidoc +++ b/docs/plugins/mapper-murmur3.asciidoc @@ -14,7 +14,7 @@ include::install_remove.asciidoc[] The `murmur3` is typically used within a multi-field, so that both the original value and its hash are stored in the index: -[source,js] +[source,console] -------------------------- PUT my_index { @@ -32,13 +32,12 @@ PUT my_index } } -------------------------- -// CONSOLE Such a mapping would allow to refer to `my_field.hash` in order to get hashes of the values of the `my_field` field. This is only useful in order to run `cardinality` aggregations: -[source,js] +[source,console] -------------------------- # Example documents PUT my_index/_doc/1 @@ -62,7 +61,6 @@ GET my_index/_search } } -------------------------- -// CONSOLE <1> Counting unique values on the `my_field.hash` field diff --git a/docs/plugins/mapper-size.asciidoc b/docs/plugins/mapper-size.asciidoc index d4bb3c144a7..5253205d7a9 100644 --- a/docs/plugins/mapper-size.asciidoc +++ b/docs/plugins/mapper-size.asciidoc @@ -13,7 +13,7 @@ include::install_remove.asciidoc[] In order to enable the `_size` field, set the mapping as follows: -[source,js] +[source,console] -------------------------- PUT my_index { @@ -24,12 +24,11 @@ PUT my_index } } -------------------------- -// CONSOLE The value of the `_size` field is accessible in queries, aggregations, scripts, and when sorting: -[source,js] +[source,console] -------------------------- # Example documents PUT my_index/_doc/1 @@ -73,7 +72,6 @@ GET my_index/_search } } -------------------------- -// CONSOLE // TEST[continued] <1> Querying on the `_size` field diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc index a16b7c28c6a..4700f8bb1d0 100644 --- a/docs/plugins/repository-azure.asciidoc +++ b/docs/plugins/repository-azure.asciidoc @@ -183,7 +183,7 @@ include::repository-shared-settings.asciidoc[] Some examples, using scripts: -[source,js] +[source,console] ---- # The simplest one PUT _snapshot/my_backup1 @@ -221,7 +221,6 @@ PUT _snapshot/my_backup4 } } ---- -// CONSOLE // TEST[skip:we don't have azure setup while testing this] Example using Java: diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index 1206f372aab..4561fde8406 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -64,7 +64,7 @@ Here is a summary of the steps: 1. Connect to the https://console.cloud.google.com/[Google Cloud Platform Console]. 2. Select your project. -3. Got to the https://console.cloud.google.com/permissions[Permission] tab. +3. Go to the https://console.cloud.google.com/permissions[Permission] tab. 4. Select the https://console.cloud.google.com/permissions/serviceaccounts[Service Accounts] tab. 5. Click *Create service account*. 6. After the account is created, select it and download a JSON key file. @@ -101,7 +101,7 @@ For example, if you added a `gcs.client.my_alternate_client.credentials_file` setting in the keystore, you can configure a repository to use those credentials like this: -[source,js] +[source,console] ---- PUT _snapshot/my_gcs_repository { @@ -112,7 +112,6 @@ PUT _snapshot/my_gcs_repository } } ---- -// CONSOLE // TEST[skip:we don't have gcs setup while testing this] The `credentials_file` settings are {ref}/secure-settings.html#reloadable-secure-settings[reloadable]. @@ -133,7 +132,7 @@ called `default`, but can be customized with the repository setting `client`. For example: -[source,js] +[source,console] ---- PUT _snapshot/my_gcs_repository { @@ -144,7 +143,6 @@ PUT _snapshot/my_gcs_repository } } ---- -// CONSOLE // TEST[skip:we don't have gcs setup while testing this] Some settings are sensitive and must be stored in the @@ -199,7 +197,7 @@ is stored in Google Cloud Storage. These can be specified when creating the repository. For example: -[source,js] +[source,console] ---- PUT _snapshot/my_gcs_repository { @@ -210,7 +208,6 @@ PUT _snapshot/my_gcs_repository } } ---- -// CONSOLE // TEST[skip:we don't have gcs set up while testing this] The following settings are supported: diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc index d903d90570b..ba51c4d5c0c 100644 --- a/docs/plugins/repository-hdfs.asciidoc +++ b/docs/plugins/repository-hdfs.asciidoc @@ -25,7 +25,7 @@ plugin folder and point `HADOOP_HOME` variable to it; this should minimize the a Once installed, define the configuration for the `hdfs` repository through the {ref}/modules-snapshots.html[REST API]: -[source,js] +[source,console] ---- PUT _snapshot/my_hdfs_repository { @@ -37,7 +37,6 @@ PUT _snapshot/my_hdfs_repository } } ---- -// CONSOLE // TEST[skip:we don't have hdfs set up while testing this] The following settings are supported: @@ -144,7 +143,7 @@ Once your keytab files are in place and your cluster is started, creating a secu add the name of the principal that you will be authenticating as in the repository settings under the `security.principal` option: -[source,js] +[source,console] ---- PUT _snapshot/my_hdfs_repository { @@ -156,13 +155,12 @@ PUT _snapshot/my_hdfs_repository } } ---- -// CONSOLE // TEST[skip:we don't have hdfs set up while testing this] If you are using different service principals for each node, you can use the `_HOST` pattern in your principal name. Elasticsearch will automatically replace the pattern with the hostname of the node at runtime: -[source,js] +[source,console] ---- PUT _snapshot/my_hdfs_repository { @@ -174,7 +172,6 @@ PUT _snapshot/my_hdfs_repository } } ---- -// CONSOLE // TEST[skip:we don't have hdfs set up while testing this] [[repository-hdfs-security-authorization]] diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index 1070fa0d655..6a2a90f1ce7 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -21,7 +21,7 @@ http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html IAM Role] credentials for authentication. The only mandatory setting is the bucket name: -[source,js] +[source,console] ---- PUT _snapshot/my_s3_repository { @@ -31,7 +31,6 @@ PUT _snapshot/my_s3_repository } } ---- -// CONSOLE // TEST[skip:we don't have s3 setup while testing this] @@ -43,7 +42,7 @@ The settings have the form `s3.client.CLIENT_NAME.SETTING_NAME`. By default, `s3` repositories use a client named `default`, but this can be modified using the <> `client`. For example: -[source,js] +[source,console] ---- PUT _snapshot/my_s3_repository { @@ -54,7 +53,6 @@ PUT _snapshot/my_s3_repository } } ---- -// CONSOLE // TEST[skip:we don't have S3 setup while testing this] Most client settings can be added to the `elasticsearch.yml` configuration file @@ -210,7 +208,7 @@ or supported. The `s3` repository type supports a number of settings to customize how data is stored in S3. These can be specified when creating the repository. For example: -[source,js] +[source,console] ---- PUT _snapshot/my_s3_repository { @@ -221,7 +219,6 @@ PUT _snapshot/my_s3_repository } } ---- -// CONSOLE // TEST[skip:we don't have S3 set up while testing this] The following settings are supported: @@ -287,7 +284,7 @@ include::repository-shared-settings.asciidoc[] `storage_class`:: Sets the S3 storage class for objects stored in the snapshot repository. - Values may be `standard`, `reduced_redundancy`, `standard_ia` + Values may be `standard`, `reduced_redundancy`, `standard_ia`, `onezone_ia` and `intelligent_tiering`. Defaults to `standard`. Changing this setting on an existing repository only affects the storage class for newly created objects, resulting in a mixed usage of @@ -310,7 +307,7 @@ by the repository settings taking precedence over client settings. For example: -[source,js] +[source,console] ---- PUT _snapshot/my_s3_repository { @@ -322,7 +319,6 @@ PUT _snapshot/my_s3_repository } } ---- -// CONSOLE // TEST[skip:we don't have s3 set up while testing this] This sets up a repository that uses all client settings from the client diff --git a/docs/plugins/store-smb.asciidoc b/docs/plugins/store-smb.asciidoc index 4f713568655..6175d38eadf 100644 --- a/docs/plugins/store-smb.asciidoc +++ b/docs/plugins/store-smb.asciidoc @@ -44,7 +44,7 @@ Note that setting will be applied for newly created indices. It can also be set on a per-index basis at index creation time: -[source,js] +[source,console] ---- PUT my_index { @@ -53,4 +53,3 @@ PUT my_index } } ---- -// CONSOLE diff --git a/docs/reference/administering.asciidoc b/docs/reference/administering.asciidoc deleted file mode 100644 index 0a3901cf7ed..00000000000 --- a/docs/reference/administering.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -[[administer-elasticsearch]] -= Administering {es} - -[partintro] --- -Elasticsearch is a complex piece of software, with many moving parts. There are -many APIs and features that are designed to help you manage your Elasticsearch -cluster. - --- - -include::administering/backup-cluster.asciidoc[] \ No newline at end of file diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index 70ff27b184b..2d47ba9a59b 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -58,7 +58,7 @@ seconds of the following minute in the specified timezone, compensating for any intervening leap seconds, so that the number of minutes and seconds past the hour is the same at the start and end. -hours (`h`, `1h`) :: +hour (`h`, `1h`) :: All hours begin at 00 minutes and 00 seconds. One hour (1h) is the interval between 00:00 minutes of the first hour and 00:00 @@ -67,7 +67,7 @@ intervening leap seconds, so that the number of minutes and seconds past the hou is the same at the start and end. -days (`d`, `1d`) :: +day (`d`, `1d`) :: All days begin at the earliest possible time, which is usually 00:00:00 (midnight). diff --git a/docs/reference/aggregations/bucket/nested-aggregation.asciidoc b/docs/reference/aggregations/bucket/nested-aggregation.asciidoc index caa735e138c..9dba67ee15d 100644 --- a/docs/reference/aggregations/bucket/nested-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/nested-aggregation.asciidoc @@ -8,14 +8,14 @@ price for the product. The mapping could look like: [source,console] -------------------------------------------------- -PUT /index +PUT /products { "mappings": { "properties" : { "resellers" : { <1> "type" : "nested", "properties" : { - "name" : { "type" : "text" }, + "reseller" : { "type" : "text" }, "price" : { "type" : "double" } } } @@ -23,14 +23,37 @@ PUT /index } } -------------------------------------------------- -// TESTSETUP -<1> The `resellers` is an array that holds nested documents under the `product` object. +<1> `resellers` is an array that holds nested documents. -The following aggregations will return the minimum price products can be purchased in: +The following request adds a product with two resellers: [source,console] -------------------------------------------------- -GET /_search +PUT /products/_doc/0 +{ + "name": "LED TV", <1> + "resellers": [ + { + "reseller": "companyA", + "price": 350 + }, + { + "reseller": "companyB", + "price": 500 + } + ] +} +-------------------------------------------------- +// TEST[s/PUT \/products\/_doc\/0/PUT \/products\/_doc\/0\?refresh/] +// TEST[continued] +<1> We are using a dynamic mapping for the `name` attribute. + + +The following request returns the minimum price a product can be purchased for: + +[source,console] +-------------------------------------------------- +GET /products/_search { "query" : { "match" : { "name" : "led tv" } @@ -47,8 +70,8 @@ GET /_search } } -------------------------------------------------- -// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/] -// TEST[s/^/PUT index\/_doc\/0\?refresh\n{"name":"led", "resellers": [{"name": "foo", "price": 350.00}, {"name": "bar", "price": 500.00}]}\n/] +// TEST[s/GET \/products\/_search/GET \/products\/_search\?filter_path=aggregations/] +// TEST[continued] As you can see above, the nested aggregation requires the `path` of the nested documents within the top level documents. Then one can define any type of aggregation over these nested documents. @@ -61,7 +84,7 @@ Response: ... "aggregations": { "resellers": { - "doc_count": 0, + "doc_count": 2, "min_price": { "value": 350 } diff --git a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc index 3c9eb2ebe2f..30d5dbb23d2 100644 --- a/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significantterms-aggregation.asciidoc @@ -17,7 +17,7 @@ that is significant and probably very relevant to their search. 5/10,000,000 vs ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT /reports { @@ -52,7 +52,6 @@ POST /reports/_bulk?refresh {"force": "Metropolitan Police Service", "crime_type": "Robbery"} ------------------------------------------------- -// NOTCONSOLE // TESTSETUP ////////////////////////// @@ -82,7 +81,7 @@ GET /_search Response: -[source,console] +[source,console-result] -------------------------------------------------- { ... diff --git a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc index 3c5d5d0cff7..b91d14e513e 100644 --- a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc @@ -51,7 +51,7 @@ A `moving_avg` aggregation looks like this in isolation: `moving_avg` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation. They can be embedded like any other metric aggregation: -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -74,7 +74,6 @@ POST /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sales] // TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] @@ -154,7 +153,7 @@ The `simple` model calculates the sum of all values in the window, then divides a simple arithmetic mean of the window. The simple model does not perform any time-dependent weighting, which means the values from a `simple` moving average tend to "lag" behind the real data. -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -181,7 +180,6 @@ POST /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sales] // TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] @@ -208,7 +206,7 @@ The `linear` model assigns a linear weighting to points in the series, such that the beginning of the window) contribute a linearly less amount to the total average. The linear weighting helps reduce the "lag" behind the data's mean, since older points have less influence. -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -235,7 +233,6 @@ POST /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sales] // TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] @@ -268,7 +265,7 @@ The default value of `alpha` is `0.3`, and the setting accepts any float from 0- The EWMA model can be <> -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -298,7 +295,6 @@ POST /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sales] // TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] @@ -327,7 +323,7 @@ The default value of `alpha` is `0.3` and `beta` is `0.1`. The settings accept a The Holt-Linear model can be <> -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -358,7 +354,6 @@ POST /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sales] // TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] @@ -416,7 +411,7 @@ The default value of `period` is `1`. The additive Holt-Winters model can be <> -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -450,7 +445,6 @@ POST /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sales] // TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] @@ -477,7 +471,7 @@ the result, but only minimally. If your data is non-zero, or you prefer to see you can disable this behavior with `pad: false` ====== -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -512,7 +506,6 @@ POST /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sales] // TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] @@ -527,7 +520,7 @@ Predictions are enabled by adding a `predict` parameter to any moving average ag predictions you would like appended to the end of the series. These predictions will be spaced out at the same interval as your buckets: -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -555,7 +548,6 @@ POST /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sales] // TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] @@ -606,7 +598,7 @@ models. Minimization is enabled/disabled via the `minimize` parameter: -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -637,7 +629,6 @@ POST /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sales] // TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] diff --git a/docs/reference/analysis.asciidoc b/docs/reference/analysis.asciidoc index e3b6aa46dd8..8370f92bb22 100644 --- a/docs/reference/analysis.asciidoc +++ b/docs/reference/analysis.asciidoc @@ -38,7 +38,7 @@ to the inverted index: Each <> field in a mapping can specify its own <>: -[source,js] +[source,console] ------------------------- PUT my_index { @@ -52,7 +52,6 @@ PUT my_index } } ------------------------- -// CONSOLE At index time, if no `analyzer` has been specified, it looks for an analyzer in the index settings called `default`. Failing that, it defaults to using diff --git a/docs/reference/analysis/analyzers/configuring.asciidoc b/docs/reference/analysis/analyzers/configuring.asciidoc index 21af2931310..0fc8d5b7159 100644 --- a/docs/reference/analysis/analyzers/configuring.asciidoc +++ b/docs/reference/analysis/analyzers/configuring.asciidoc @@ -6,7 +6,7 @@ of them, however, support configuration options to alter their behaviour. For instance, the <> can be configured to support a list of stop words: -[source,js] +[source,console] -------------------------------- PUT my_index { @@ -49,7 +49,6 @@ POST my_index/_analyze } -------------------------------- -// CONSOLE <1> We define the `std_english` analyzer to be based on the `standard` analyzer, but configured to remove the pre-defined list of English stopwords. diff --git a/docs/reference/analysis/analyzers/custom-analyzer.asciidoc b/docs/reference/analysis/analyzers/custom-analyzer.asciidoc index cc938b0cb13..2019242e8a8 100644 --- a/docs/reference/analysis/analyzers/custom-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/custom-analyzer.asciidoc @@ -51,7 +51,7 @@ Token Filters:: * <> * <> -[source,js] +[source,console] -------------------------------- PUT my_index { @@ -80,7 +80,6 @@ POST my_index/_analyze "text": "Is this déjà vu?" } -------------------------------- -// CONSOLE <1> Setting `type` to `custom` tells Elasticsearch that we are defining a custom analyzer. Compare this to how <>: @@ -154,7 +153,7 @@ Token Filters:: Here is an example: -[source,js] +[source,console] -------------------------------------------------- PUT my_index { @@ -204,7 +203,6 @@ POST my_index/_analyze "text": "I'm a :) person, and you?" } -------------------------------------------------- -// CONSOLE <1> Assigns the index a default custom analyzer, `my_custom_analyzer`. This analyzer uses a custom tokenizer, character filter, and token filter that diff --git a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc index 8736b505443..a2a6932edad 100644 --- a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc @@ -12,7 +12,7 @@ configured, stop words will also be removed. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -20,7 +20,6 @@ POST _analyze "text": "Yes yes, Gödel said this sentence is consistent and." } --------------------------- -// CONSOLE ///////////////////// @@ -83,7 +82,7 @@ about stop word configuration. In this example, we configure the `fingerprint` analyzer to use the pre-defined list of English stop words: -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -105,7 +104,6 @@ POST my_index/_analyze "text": "Yes yes, Gödel said this sentence is consistent and." } ---------------------------- -// CONSOLE ///////////////////// @@ -154,7 +152,7 @@ it, usually by adding token filters. This would recreate the built-in `fingerprint` analyzer and you can use it as a starting point for further customization: -[source,js] +[source,console] ---------------------------------------------------- PUT /fingerprint_example { @@ -174,5 +172,4 @@ PUT /fingerprint_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: fingerprint_example, first: fingerprint, second: rebuilt_fingerprint}\nendyaml\n/] diff --git a/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc b/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc index e3f53381225..8b958e04f0d 100644 --- a/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc @@ -7,7 +7,7 @@ string as a single token. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -15,7 +15,6 @@ POST _analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } --------------------------- -// CONSOLE ///////////////////// @@ -65,7 +64,7 @@ into tokens, but just in case you need it, this would recreate the built-in `keyword` analyzer and you can use it as a starting point for further customization: -[source,js] +[source,console] ---------------------------------------------------- PUT /keyword_example { @@ -82,6 +81,6 @@ PUT /keyword_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: keyword_example, first: keyword, second: rebuilt_keyword}\nendyaml\n/] + <1> You'd add any token filters here. diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 099950ca395..3c1047d81ef 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -76,7 +76,7 @@ the `keyword_marker` token filter from the custom analyzer configuration. The `arabic` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /arabic_example { @@ -113,9 +113,9 @@ PUT /arabic_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"arabic_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: arabic_example, first: arabic, second: rebuilt_arabic}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -126,7 +126,7 @@ PUT /arabic_example The `armenian` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /armenian_example { @@ -161,9 +161,9 @@ PUT /armenian_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"armenian_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: armenian_example, first: armenian, second: rebuilt_armenian}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -174,7 +174,7 @@ PUT /armenian_example The `basque` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /basque_example { @@ -209,9 +209,9 @@ PUT /basque_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"basque_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: basque_example, first: basque, second: rebuilt_basque}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -222,7 +222,7 @@ PUT /basque_example The `bengali` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /bengali_example { @@ -260,9 +260,9 @@ PUT /bengali_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"bengali_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bengali_example, first: bengali, second: rebuilt_bengali}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -273,7 +273,7 @@ PUT /bengali_example The `brazilian` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /brazilian_example { @@ -308,9 +308,9 @@ PUT /brazilian_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"brazilian_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: brazilian_example, first: brazilian, second: rebuilt_brazilian}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -321,7 +321,7 @@ PUT /brazilian_example The `bulgarian` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /bulgarian_example { @@ -356,9 +356,9 @@ PUT /bulgarian_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"bulgarian_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: bulgarian_example, first: bulgarian, second: rebuilt_bulgarian}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -369,7 +369,7 @@ PUT /bulgarian_example The `catalan` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /catalan_example { @@ -410,9 +410,9 @@ PUT /catalan_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"catalan_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: catalan_example, first: catalan, second: rebuilt_catalan}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -426,7 +426,7 @@ for CJK text than the `cjk` analyzer. Experiment with your text and queries. The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /cjk_example { @@ -459,9 +459,9 @@ PUT /cjk_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"cjk_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: cjk_example, first: cjk, second: rebuilt_cjk}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. The default stop words are *almost* the same as the `_english_` set, but not exactly @@ -472,7 +472,7 @@ PUT /cjk_example The `czech` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /czech_example { @@ -507,9 +507,9 @@ PUT /czech_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"czech_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: czech_example, first: czech, second: rebuilt_czech}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -520,7 +520,7 @@ PUT /czech_example The `danish` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /danish_example { @@ -555,9 +555,9 @@ PUT /danish_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"danish_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: danish_example, first: danish, second: rebuilt_danish}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -568,7 +568,7 @@ PUT /danish_example The `dutch` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /dutch_example { @@ -613,9 +613,9 @@ PUT /dutch_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"dutch_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: dutch_example, first: dutch, second: rebuilt_dutch}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -626,7 +626,7 @@ PUT /dutch_example The `english` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /english_example { @@ -666,9 +666,9 @@ PUT /english_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"english_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: english_example, first: english, second: rebuilt_english}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -679,7 +679,7 @@ PUT /english_example The `finnish` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /finnish_example { @@ -714,9 +714,9 @@ PUT /finnish_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"finnish_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: finnish_example, first: finnish, second: rebuilt_finnish}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -727,7 +727,7 @@ PUT /finnish_example The `french` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /french_example { @@ -772,9 +772,9 @@ PUT /french_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"french_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: french_example, first: french, second: rebuilt_french}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -785,7 +785,7 @@ PUT /french_example The `galician` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /galician_example { @@ -820,9 +820,9 @@ PUT /galician_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"galician_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: galician_example, first: galician, second: rebuilt_galician}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -833,7 +833,7 @@ PUT /galician_example The `german` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /german_example { @@ -869,9 +869,9 @@ PUT /german_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"german_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: german_example, first: german, second: rebuilt_german}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -882,7 +882,7 @@ PUT /german_example The `greek` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /greek_example { @@ -921,9 +921,9 @@ PUT /greek_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"greek_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: greek_example, first: greek, second: rebuilt_greek}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -934,7 +934,7 @@ PUT /greek_example The `hindi` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /hindi_example { @@ -972,9 +972,9 @@ PUT /hindi_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"hindi_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hindi_example, first: hindi, second: rebuilt_hindi}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -985,7 +985,7 @@ PUT /hindi_example The `hungarian` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /hungarian_example { @@ -1020,9 +1020,9 @@ PUT /hungarian_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"hungarian_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: hungarian_example, first: hungarian, second: rebuilt_hungarian}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1034,7 +1034,7 @@ PUT /hungarian_example The `indonesian` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /indonesian_example { @@ -1069,9 +1069,9 @@ PUT /indonesian_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"indonesian_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: indonesian_example, first: indonesian, second: rebuilt_indonesian}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1082,7 +1082,7 @@ PUT /indonesian_example The `irish` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /irish_example { @@ -1133,9 +1133,9 @@ PUT /irish_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"irish_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: irish_example, first: irish, second: rebuilt_irish}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1146,7 +1146,7 @@ PUT /irish_example The `italian` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /italian_example { @@ -1192,9 +1192,9 @@ PUT /italian_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"italian_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: italian_example, first: italian, second: rebuilt_italian}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1205,7 +1205,7 @@ PUT /italian_example The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /latvian_example { @@ -1240,9 +1240,9 @@ PUT /latvian_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"latvian_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: latvian_example, first: latvian, second: rebuilt_latvian}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1253,7 +1253,7 @@ PUT /latvian_example The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /lithuanian_example { @@ -1288,9 +1288,9 @@ PUT /lithuanian_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"lithuanian_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: lithuanian_example, first: lithuanian, second: rebuilt_lithuanian}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1301,7 +1301,7 @@ PUT /lithuanian_example The `norwegian` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /norwegian_example { @@ -1336,9 +1336,9 @@ PUT /norwegian_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"norwegian_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: norwegian_example, first: norwegian, second: rebuilt_norwegian}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1349,7 +1349,7 @@ PUT /norwegian_example The `persian` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /persian_example { @@ -1384,8 +1384,8 @@ PUT /persian_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: persian_example, first: persian, second: rebuilt_persian}\nendyaml\n/] + <1> Replaces zero-width non-joiners with an ASCII space. <2> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. @@ -1395,7 +1395,7 @@ PUT /persian_example The `portuguese` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /portuguese_example { @@ -1430,9 +1430,9 @@ PUT /portuguese_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"portuguese_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: portuguese_example, first: portuguese, second: rebuilt_portuguese}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1443,7 +1443,7 @@ PUT /portuguese_example The `romanian` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /romanian_example { @@ -1478,9 +1478,9 @@ PUT /romanian_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"romanian_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: romanian_example, first: romanian, second: rebuilt_romanian}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1492,7 +1492,7 @@ PUT /romanian_example The `russian` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /russian_example { @@ -1527,9 +1527,9 @@ PUT /russian_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"russian_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: russian_example, first: russian, second: rebuilt_russian}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1540,7 +1540,7 @@ PUT /russian_example The `sorani` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /sorani_example { @@ -1577,9 +1577,9 @@ PUT /sorani_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"sorani_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: sorani_example, first: sorani, second: rebuilt_sorani}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1590,7 +1590,7 @@ PUT /sorani_example The `spanish` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /spanish_example { @@ -1625,9 +1625,9 @@ PUT /spanish_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"spanish_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: spanish_example, first: spanish, second: rebuilt_spanish}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1638,7 +1638,7 @@ PUT /spanish_example The `swedish` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /swedish_example { @@ -1673,9 +1673,9 @@ PUT /swedish_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"swedish_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: swedish_example, first: swedish, second: rebuilt_swedish}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1686,7 +1686,7 @@ PUT /swedish_example The `turkish` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /turkish_example { @@ -1726,9 +1726,9 @@ PUT /turkish_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"turkish_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: turkish_example, first: turkish, second: rebuilt_turkish}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> This filter should be removed unless there are words which should @@ -1739,7 +1739,7 @@ PUT /turkish_example The `thai` analyzer could be reimplemented as a `custom` analyzer as follows: -[source,js] +[source,console] ---------------------------------------------------- PUT /thai_example { @@ -1765,8 +1765,8 @@ PUT /thai_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/"thai_keywords",//] // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: thai_example, first: thai, second: rebuilt_thai}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. diff --git a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc index 899be69d260..3c57c8c79c0 100644 --- a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc @@ -22,7 +22,7 @@ Read more about http://www.regular-expressions.info/catastrophic.html[pathologic [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -30,7 +30,6 @@ POST _analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } --------------------------- -// CONSOLE ///////////////////// @@ -174,7 +173,7 @@ about stop word configuration. In this example, we configure the `pattern` analyzer to split email addresses on non-word characters or on underscores (`\W|_`), and to lower-case the result: -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -197,7 +196,6 @@ POST my_index/_analyze "text": "John_Smith@foo-bar.com" } ---------------------------- -// CONSOLE <1> The backslashes in the pattern need to be escaped when specifying the pattern as a JSON string. @@ -262,7 +260,7 @@ The above example produces the following terms: The following more complicated example splits CamelCase text into tokens: -[source,js] +[source,console] -------------------------------------------------- PUT my_index { @@ -284,7 +282,6 @@ GET my_index/_analyze "text": "MooseX::FTPClass2_beta" } -------------------------------------------------- -// CONSOLE ///////////////////// @@ -381,7 +378,7 @@ it, usually by adding token filters. This would recreate the built-in `pattern` analyzer and you can use it as a starting point for further customization: -[source,js] +[source,console] ---------------------------------------------------- PUT /pattern_example { @@ -405,7 +402,6 @@ PUT /pattern_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: pattern_example, first: pattern, second: rebuilt_pattern}\nendyaml\n/] <1> The default pattern is `\W+` which splits on non-word characters and this is where you'd change it. diff --git a/docs/reference/analysis/analyzers/simple-analyzer.asciidoc b/docs/reference/analysis/analyzers/simple-analyzer.asciidoc index 57662b72329..75614a151ac 100644 --- a/docs/reference/analysis/analyzers/simple-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/simple-analyzer.asciidoc @@ -7,7 +7,7 @@ character which is not a letter. All terms are lower cased. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -15,7 +15,6 @@ POST _analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } --------------------------- -// CONSOLE ///////////////////// @@ -132,7 +131,7 @@ it as a `custom` analyzer and modify it, usually by adding token filters. This would recreate the built-in `simple` analyzer and you can use it as a starting point for further customization: -[source,js] +[source,console] ---------------------------------------------------- PUT /simple_example { @@ -149,6 +148,5 @@ PUT /simple_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: simple_example, first: simple, second: rebuilt_simple}\nendyaml\n/] <1> You'd add any token filters here. diff --git a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc index 4096560dfeb..52a206af533 100644 --- a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc @@ -10,7 +10,7 @@ for most languages. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -18,7 +18,6 @@ POST _analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } --------------------------- -// CONSOLE ///////////////////// @@ -148,7 +147,7 @@ In this example, we configure the `standard` analyzer to have a `max_token_length` of 5 (for demonstration purposes), and to use the pre-defined list of English stop words: -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -171,7 +170,6 @@ POST my_index/_analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } ---------------------------- -// CONSOLE ///////////////////// @@ -279,7 +277,7 @@ parameters then you need to recreate it as a `custom` analyzer and modify it, usually by adding token filters. This would recreate the built-in `standard` analyzer and you can use it as a starting point: -[source,js] +[source,console] ---------------------------------------------------- PUT /standard_example { @@ -297,6 +295,5 @@ PUT /standard_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: standard_example, first: standard, second: rebuilt_standard}\nendyaml\n/] <1> You'd add any token filters after `lowercase`. diff --git a/docs/reference/analysis/analyzers/stop-analyzer.asciidoc b/docs/reference/analysis/analyzers/stop-analyzer.asciidoc index 3176b6e87f9..517233d93b7 100644 --- a/docs/reference/analysis/analyzers/stop-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/stop-analyzer.asciidoc @@ -8,7 +8,7 @@ but adds support for removing stop words. It defaults to using the [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -16,7 +16,6 @@ POST _analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } --------------------------- -// CONSOLE ///////////////////// @@ -127,7 +126,7 @@ about stop word configuration. In this example, we configure the `stop` analyzer to use a specified list of words as stop words: -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -149,7 +148,6 @@ POST my_index/_analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } ---------------------------- -// CONSOLE ///////////////////// @@ -244,7 +242,7 @@ it, usually by adding token filters. This would recreate the built-in `stop` analyzer and you can use it as a starting point for further customization: -[source,js] +[source,console] ---------------------------------------------------- PUT /stop_example { @@ -268,8 +266,8 @@ PUT /stop_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: stop_example, first: stop, second: rebuilt_stop}\nendyaml\n/] + <1> The default stopwords can be overridden with the `stopwords` or `stopwords_path` parameters. <2> You'd add any token filters after `english_stop`. diff --git a/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc b/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc index 531751c63e2..989abd4f13e 100644 --- a/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc @@ -7,7 +7,7 @@ whitespace character. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -15,7 +15,6 @@ POST _analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } --------------------------- -// CONSOLE ///////////////////// @@ -125,7 +124,7 @@ recreate it as a `custom` analyzer and modify it, usually by adding token filters. This would recreate the built-in `whitespace` analyzer and you can use it as a starting point for further customization: -[source,js] +[source,console] ---------------------------------------------------- PUT /whitespace_example { @@ -142,6 +141,6 @@ PUT /whitespace_example } } ---------------------------------------------------- -// CONSOLE // TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: whitespace_example, first: whitespace, second: rebuilt_whitespace}\nendyaml\n/] + <1> You'd add any token filters here. diff --git a/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc b/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc index 33741ee6a6c..83dcfd2d380 100644 --- a/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/htmlstrip-charfilter.asciidoc @@ -8,7 +8,7 @@ replaces HTML entities with their decoded value (e.g. replacing `&` with [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -17,7 +17,7 @@ POST _analyze "text": "

I'm so happy!

" } --------------------------- -// CONSOLE + <1> The <> returns a single term. ///////////////////// @@ -70,7 +70,7 @@ The `html_strip` character filter accepts the following parameter: In this example, we configure the `html_strip` character filter to leave `` tags in place: -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -98,7 +98,6 @@ POST my_index/_analyze "text": "

I'm so happy!

" } ---------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/charfilters/mapping-charfilter.asciidoc b/docs/reference/analysis/charfilters/mapping-charfilter.asciidoc index ee95d1798d1..5bce2beed8a 100644 --- a/docs/reference/analysis/charfilters/mapping-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/mapping-charfilter.asciidoc @@ -31,7 +31,7 @@ Either the `mappings` or `mappings_path` parameter must be provided. In this example, we configure the `mapping` character filter to replace Arabic numerals with their Latin equivalents: -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -72,7 +72,6 @@ POST my_index/_analyze "text": "My license plate is ٢٥٠١٥" } ---------------------------- -// CONSOLE ///////////////////// @@ -104,7 +103,7 @@ The above example produces the following term: Keys and values can be strings with multiple characters. The following example replaces the `:)` and `:(` emoticons with a text equivalent: -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -137,7 +136,6 @@ POST my_index/_analyze "text": "I'm delighted about it :(" } ---------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc index a6fc0dcb39e..d6f4670fb40 100644 --- a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc @@ -47,7 +47,7 @@ In this example, we configure the `pattern_replace` character filter to replace any embedded dashes in numbers with underscores, i.e `123-456-789` -> `123_456_789`: -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -78,12 +78,11 @@ POST my_index/_analyze "text": "My credit card is 123-456-789" } ---------------------------- -// CONSOLE // TEST[s/\$1//] // the test framework doesn't like the $1 so we just throw it away rather than // try to get it to work properly. At least we are still testing the charfilter. -The above example produces the following term: +The above example produces the following terms: [source,text] --------------------------- @@ -98,7 +97,7 @@ This example inserts a space whenever it encounters a lower-case letter followed by an upper-case letter (i.e. `fooBarBaz` -> `foo Bar Baz`), allowing camelCase words to be queried individually: -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -140,7 +139,6 @@ POST my_index/_analyze "text": "The fooBarBaz method" } ---------------------------- -// CONSOLE ///////////////////// @@ -200,7 +198,7 @@ Querying for `bar` will find the document correctly, but highlighting on the result will produce incorrect highlights, because our character filter changed the length of the original text: -[source,js] +[source,console] ---------------------------- PUT my_index/_doc/1?refresh { @@ -221,7 +219,6 @@ GET my_index/_search } } ---------------------------- -// CONSOLE // TEST[continued] The output from the above is: diff --git a/docs/reference/analysis/normalizers.asciidoc b/docs/reference/analysis/normalizers.asciidoc index ddf37acd67b..94c99eec899 100644 --- a/docs/reference/analysis/normalizers.asciidoc +++ b/docs/reference/analysis/normalizers.asciidoc @@ -21,7 +21,7 @@ to get one is by building a custom one. Custom normalizers take a list of char <> and a list of <>. -[source,js] +[source,console] -------------------------------- PUT index { @@ -55,4 +55,3 @@ PUT index } } -------------------------------- -// CONSOLE diff --git a/docs/reference/analysis/testing.asciidoc b/docs/reference/analysis/testing.asciidoc index aa8fa4f9ec0..092a7aa70ee 100644 --- a/docs/reference/analysis/testing.asciidoc +++ b/docs/reference/analysis/testing.asciidoc @@ -5,7 +5,7 @@ terms produced by an analyzer. A built-in analyzer (or combination of built-in tokenizer, token filters, and character filters) can be specified inline in the request: -[source,js] +[source,console] ------------------------------------- POST _analyze { @@ -20,7 +20,6 @@ POST _analyze "text": "Is this déja vu?" } ------------------------------------- -// CONSOLE @@ -39,7 +38,7 @@ highlighting search snippets). Alternatively, a <> can be referred to when running the `analyze` API on a specific index: -[source,js] +[source,console] ------------------------------------- PUT my_index { @@ -79,7 +78,6 @@ GET my_index/_analyze <3> "text": "Is this déjà vu?" } ------------------------------------- -// CONSOLE <1> Define a `custom` analyzer called `std_folded`. <2> The field `my_text` uses the `std_folded` analyzer. diff --git a/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc index bd22b013334..49f1b9869b0 100644 --- a/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/asciifolding-tokenfilter.asciidoc @@ -6,7 +6,7 @@ and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if one exists. Example: -[source,js] +[source,console] -------------------------------------------------- PUT /asciifold_example { @@ -22,13 +22,12 @@ PUT /asciifold_example } } -------------------------------------------------- -// CONSOLE Accepts `preserve_original` setting which defaults to false but if true will keep the original token as well as emit the folded token. For example: -[source,js] +[source,console] -------------------------------------------------- PUT /asciifold_example { @@ -50,4 +49,3 @@ PUT /asciifold_example } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc index cc26d025f04..8ad2403f38e 100644 --- a/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc @@ -14,7 +14,7 @@ Bigrams are generated for characters in `han`, `hiragana`, `katakana` and `hangul`, but bigrams can be disabled for particular scripts with the `ignored_scripts` parameter. All non-CJK input is passed through unmodified. -[source,js] +[source,console] -------------------------------------------------- PUT /cjk_bigram_example { @@ -41,4 +41,3 @@ PUT /cjk_bigram_example } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc index 3f5ba6a7015..e68c8df6387 100644 --- a/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc @@ -39,7 +39,7 @@ Note, `common_words` or `common_words_path` field is required. Here is an example: -[source,js] +[source,console] -------------------------------------------------- PUT /common_grams_example { @@ -70,11 +70,10 @@ PUT /common_grams_example } } -------------------------------------------------- -// CONSOLE You can see the output by using e.g. the `_analyze` endpoint: -[source,js] +[source,console] -------------------------------------------------- POST /common_grams_example/_analyze { @@ -82,7 +81,6 @@ POST /common_grams_example/_analyze "text" : "the quick brown is a fox" } -------------------------------------------------- -// CONSOLE // TEST[continued] And the response will be: diff --git a/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc index d200c0b988b..f80bcff723e 100644 --- a/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/compound-word-tokenfilter.asciidoc @@ -82,7 +82,7 @@ Whether to include only the longest matching subword or not. Defaults to `false Here is an example: -[source,js] +[source,console] -------------------------------------------------- PUT /compound_word_example { @@ -113,4 +113,3 @@ PUT /compound_word_example } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/condition-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/condition-tokenfilter.asciidoc index 38ee975601a..17dc46faad8 100644 --- a/docs/reference/analysis/tokenfilters/condition-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/condition-tokenfilter.asciidoc @@ -18,7 +18,7 @@ script:: a predicate script that determines whether or not the filters will be a You can set it up like: -[source,js] +[source,console] -------------------------------------------------- PUT /condition_example { @@ -43,14 +43,13 @@ PUT /condition_example } } -------------------------------------------------- -// CONSOLE <1> This will only apply the lowercase filter to terms that are less than 5 characters in length And test it like: -[source,js] +[source,console] -------------------------------------------------- POST /condition_example/_analyze { @@ -58,7 +57,6 @@ POST /condition_example/_analyze "text" : "What Flapdoodle" } -------------------------------------------------- -// CONSOLE // TEST[continued] And it'd respond: diff --git a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc index 34646a0413e..91577342f0a 100644 --- a/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/elision-tokenfilter.asciidoc @@ -11,7 +11,7 @@ case sensitive. For example: -[source,js] +[source,console] -------------------------------------------------- PUT /elision_example { @@ -34,4 +34,3 @@ PUT /elision_example } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc index cef687f7619..2f258b00ee9 100644 --- a/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/hunspell-tokenfilter.asciidoc @@ -40,7 +40,7 @@ settings defined in the `elasticsearch.yml`). One can use the hunspell stem filter by configuring it the analysis settings: -[source,js] +[source,console] -------------------------------------------------- PUT /hunspell_example { @@ -63,7 +63,6 @@ PUT /hunspell_example } } -------------------------------------------------- -// CONSOLE The hunspell token filter accepts four options: diff --git a/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc index 0b2f232112a..8423d299411 100644 --- a/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keep-types-tokenfilter.asciidoc @@ -17,7 +17,7 @@ if set to `exclude` the specified token types will be removed from the stream You can set it up like: -[source,js] +[source,console] -------------------------------------------------- PUT /keep_types_example { @@ -39,11 +39,10 @@ PUT /keep_types_example } } -------------------------------------------------- -// CONSOLE And test it like: -[source,js] +[source,console] -------------------------------------------------- POST /keep_types_example/_analyze { @@ -51,7 +50,6 @@ POST /keep_types_example/_analyze "text" : "this is just 1 a test" } -------------------------------------------------- -// CONSOLE // TEST[continued] The response will be: @@ -77,7 +75,7 @@ Note how only the `` token is in the output. If the `mode` parameter is set to `exclude` like in the following example: -[source,js] +[source,console] -------------------------------------------------- PUT /keep_types_exclude_example { @@ -100,11 +98,10 @@ PUT /keep_types_exclude_example } } -------------------------------------------------- -// CONSOLE And we test it like: -[source,js] +[source,console] -------------------------------------------------- POST /keep_types_exclude_example/_analyze { @@ -112,7 +109,6 @@ POST /keep_types_exclude_example/_analyze "text" : "hello 101 world" } -------------------------------------------------- -// CONSOLE // TEST[continued] The response will be: diff --git a/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc index b7385379be9..8166c4c2d5b 100644 --- a/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keep-words-tokenfilter.asciidoc @@ -18,7 +18,7 @@ keep_words_case:: a boolean indicating whether to lower case the words (defaults [float] === Settings example -[source,js] +[source,console] -------------------------------------------------- PUT /keep_words_example { @@ -48,4 +48,3 @@ PUT /keep_words_example } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc index bf79167bdf2..ea9dcad8a6c 100644 --- a/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc @@ -21,7 +21,7 @@ in the text. You can configure it like: -[source,js] +[source,console] -------------------------------------------------- PUT /keyword_marker_example { @@ -49,11 +49,10 @@ PUT /keyword_marker_example } } -------------------------------------------------- -// CONSOLE And test it with: -[source,js] +[source,console] -------------------------------------------------- POST /keyword_marker_example/_analyze { @@ -61,7 +60,6 @@ POST /keyword_marker_example/_analyze "text" : "I like cats" } -------------------------------------------------- -// CONSOLE // TEST[continued] And it'd respond: @@ -97,7 +95,7 @@ And it'd respond: As compared to the `normal` analyzer which has `cats` stemmed to `cat`: -[source,js] +[source,console] -------------------------------------------------- POST /keyword_marker_example/_analyze { @@ -105,7 +103,6 @@ POST /keyword_marker_example/_analyze "text" : "I like cats" } -------------------------------------------------- -// CONSOLE // TEST[continued] Response: diff --git a/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc index f8f298c1608..ca15b2da5a8 100644 --- a/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc @@ -12,7 +12,7 @@ unnecessary duplicates. Here is an example of using the `keyword_repeat` token filter to preserve both the stemmed and unstemmed version of tokens: -[source,js] +[source,console] -------------------------------------------------- PUT /keyword_repeat_example { @@ -35,11 +35,10 @@ PUT /keyword_repeat_example } } -------------------------------------------------- -// CONSOLE And you can test it with: -[source,js] +[source,console] -------------------------------------------------- POST /keyword_repeat_example/_analyze { @@ -47,7 +46,6 @@ POST /keyword_repeat_example/_analyze "text" : "I like cats" } -------------------------------------------------- -// CONSOLE // TEST[continued] And it'd respond: diff --git a/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc index ba2018c1076..7fe2432ca54 100644 --- a/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc @@ -16,7 +16,7 @@ is `false`. Here is an example: -[source,js] +[source,console] -------------------------------------------------- PUT /limit_example { @@ -39,4 +39,3 @@ PUT /limit_example } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc index 519fd77ba2a..f04cea237fa 100644 --- a/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/lowercase-tokenfilter.asciidoc @@ -8,7 +8,7 @@ Lowercase token filter supports Greek, Irish, and Turkish lowercase token filters through the `language` parameter. Below is a usage example in a custom analyzer -[source,js] +[source,console] -------------------------------------------------- PUT /lowercase_example { @@ -36,4 +36,3 @@ PUT /lowercase_example } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc index 75bcf53b6d9..86e14c09d51 100644 --- a/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc @@ -123,8 +123,8 @@ POST /index1 }, "mappings": { "properties": { - "text": { - "fingerprint": "text", + "fingerprint": { + "type": "text", "analyzer": "my_analyzer" } } diff --git a/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc index 9e82c84df33..c943c95defe 100644 --- a/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/multiplexer-tokenfilter.asciidoc @@ -29,7 +29,7 @@ preserve_original:: if `true` (the default) then emit the original token in You can set it up like: -[source,js] +[source,console] -------------------------------------------------- PUT /multiplexer_example { @@ -51,11 +51,10 @@ PUT /multiplexer_example } } -------------------------------------------------- -// CONSOLE And test it like: -[source,js] +[source,console] -------------------------------------------------- POST /multiplexer_example/_analyze { @@ -63,7 +62,6 @@ POST /multiplexer_example/_analyze "text" : "Going HOME" } -------------------------------------------------- -// CONSOLE // TEST[continued] And it'd respond: diff --git a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc index 5b935d31f12..0b5aa62029f 100644 --- a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc @@ -44,7 +44,7 @@ emit the original token: `abc123def456`. This is particularly useful for indexing text like camel-case code, eg `stripHTML` where a user may search for `"strip html"` or `"striphtml"`: -[source,js] +[source,console] -------------------------------------------------- PUT test { @@ -70,7 +70,6 @@ PUT test } } -------------------------------------------------- -// CONSOLE When used to analyze the text @@ -85,7 +84,7 @@ this emits the tokens: [ `import`, `static`, `org`, `apache`, `commons`, Another example is analyzing email addresses: -[source,js] +[source,console] -------------------------------------------------- PUT test { @@ -113,7 +112,6 @@ PUT test } } -------------------------------------------------- -// CONSOLE When the above analyzer is used on an email address like: diff --git a/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc index 91e4ea2bfff..e21e4e5690f 100644 --- a/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/predicate-tokenfilter.asciidoc @@ -15,7 +15,7 @@ be emitted. Note that only inline scripts are supported. You can set it up like: -[source,js] +[source,console] -------------------------------------------------- PUT /condition_example { @@ -39,13 +39,12 @@ PUT /condition_example } } -------------------------------------------------- -// CONSOLE <1> This will emit tokens that are more than 5 characters long And test it like: -[source,js] +[source,console] -------------------------------------------------- POST /condition_example/_analyze { @@ -53,7 +52,6 @@ POST /condition_example/_analyze "text" : "What Flapdoodle" } -------------------------------------------------- -// CONSOLE // TEST[continued] And it'd respond: diff --git a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc index 99ed03649ff..bafb4fb7f77 100644 --- a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc @@ -10,7 +10,7 @@ values: `Armenian`, `Basque`, `Catalan`, `Danish`, `Dutch`, `English`, For example: -[source,js] +[source,console] -------------------------------------------------- PUT /my_index { @@ -32,4 +32,3 @@ PUT /my_index } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc index e178181d147..d2fbba84180 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc @@ -18,7 +18,7 @@ absolute) to a list of mappings. Here is an example: -[source,js] +[source,console] -------------------------------------------------- PUT /my_index { @@ -40,7 +40,6 @@ PUT /my_index } } -------------------------------------------------- -// CONSOLE Where the file looks like: @@ -51,7 +50,7 @@ include::{es-test-dir}/cluster/config/analysis/stemmer_override.txt[] You can also define the overrides rules inline: -[source,js] +[source,console] -------------------------------------------------- PUT /my_index { @@ -76,4 +75,3 @@ PUT /my_index } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index b5d5426ff27..29ae8e96606 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -10,7 +10,7 @@ A filter that provides access to (almost) all of the available stemming token filters through a single unified interface. For example: -[source,js] +[source,console] -------------------------------------------------- PUT /my_index { @@ -32,7 +32,6 @@ PUT /my_index } } -------------------------------------------------- -// CONSOLE The `language`/`name` parameter controls the stemmer with the following available values (the preferred filters are marked in *bold*): diff --git a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc index 259bf785b5b..d73d878696e 100644 --- a/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stop-tokenfilter.asciidoc @@ -31,7 +31,7 @@ type: The `stopwords` parameter accepts either an array of stopwords: -[source,js] +[source,console] ------------------------------------ PUT /my_index { @@ -47,11 +47,10 @@ PUT /my_index } } ------------------------------------ -// CONSOLE or a predefined language-specific list: -[source,js] +[source,console] ------------------------------------ PUT /my_index { @@ -67,7 +66,6 @@ PUT /my_index } } ------------------------------------ -// CONSOLE Elasticsearch provides the following predefined list of languages: diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc index b434129626d..63e037de486 100644 --- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc @@ -19,7 +19,7 @@ standard <>. Synonyms are configured using a configuration file. Here is an example: -[source,js] +[source,console] -------------------------------------------------- PUT /test_index { @@ -43,7 +43,6 @@ PUT /test_index } } -------------------------------------------------- -// CONSOLE The above configures a `search_synonyms` filter, with a path of `analysis/synonym.txt` (relative to the `config` location). The @@ -55,7 +54,7 @@ Additional settings are: * `lenient` (defaults to `false`). If `true` ignores exceptions while parsing the synonym configuration. It is important to note that only those synonym rules which cannot get parsed are ignored. For instance consider the following request: -[source,js] +[source,console] -------------------------------------------------- PUT /test_index { @@ -84,7 +83,7 @@ PUT /test_index } } -------------------------------------------------- -// CONSOLE + With the above request the word `bar` gets skipped but a mapping `foo => baz` is still added. However, if the mapping being added was "foo, baz => bar" nothing would get added to the synonym list. This is because the target word for the mapping is itself eliminated because it was a stop word. Similarly, if the mapping was "bar, foo, baz" and `expand` was @@ -115,7 +114,7 @@ include::{es-test-dir}/cluster/config/analysis/synonym.txt[] You can also define synonyms for the filter directly in the configuration file (note use of `synonyms` instead of `synonyms_path`): -[source,js] +[source,console] -------------------------------------------------- PUT /test_index { @@ -136,7 +135,6 @@ PUT /test_index } } -------------------------------------------------- -// CONSOLE However, it is recommended to define large synonyms set in a file using `synonyms_path`, because specifying them inline increases cluster size unnecessarily. @@ -147,7 +145,7 @@ However, it is recommended to define large synonyms set in a file using Synonyms based on http://wordnet.princeton.edu/[WordNet] format can be declared using `format`: -[source,js] +[source,console] -------------------------------------------------- PUT /test_index { @@ -170,7 +168,6 @@ PUT /test_index } } -------------------------------------------------- -// CONSOLE Using `synonyms_path` to define WordNet synonyms in a file is supported as well. diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc index f47e97d27ea..d6a82bc2654 100644 --- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc @@ -5,7 +5,7 @@ The `synonym` token filter allows to easily handle synonyms during the analysis process. Synonyms are configured using a configuration file. Here is an example: -[source,js] +[source,console] -------------------------------------------------- PUT /test_index { @@ -29,7 +29,6 @@ PUT /test_index } } -------------------------------------------------- -// CONSOLE The above configures a `synonym` filter, with a path of `analysis/synonym.txt` (relative to the `config` location). The @@ -45,8 +44,7 @@ Additional settings are: to note that only those synonym rules which cannot get parsed are ignored. For instance consider the following request: - -[source,js] +[source,console] -------------------------------------------------- PUT /test_index { @@ -75,7 +73,7 @@ PUT /test_index } } -------------------------------------------------- -// CONSOLE + With the above request the word `bar` gets skipped but a mapping `foo => baz` is still added. However, if the mapping being added was "foo, baz => bar" nothing would get added to the synonym list. This is because the target word for the mapping is itself eliminated because it was a stop word. Similarly, if the mapping was "bar, foo, baz" and `expand` was @@ -107,7 +105,7 @@ include::{es-test-dir}/cluster/config/analysis/synonym.txt[] You can also define synonyms for the filter directly in the configuration file (note use of `synonyms` instead of `synonyms_path`): -[source,js] +[source,console] -------------------------------------------------- PUT /test_index { @@ -128,7 +126,6 @@ PUT /test_index } } -------------------------------------------------- -// CONSOLE However, it is recommended to define large synonyms set in a file using `synonyms_path`, because specifying them inline increases cluster size unnecessarily. @@ -139,7 +136,7 @@ However, it is recommended to define large synonyms set in a file using Synonyms based on http://wordnet.princeton.edu/[WordNet] format can be declared using `format`: -[source,js] +[source,console] -------------------------------------------------- PUT /test_index { @@ -162,7 +159,6 @@ PUT /test_index } } -------------------------------------------------- -// CONSOLE Using `synonyms_path` to define WordNet synonyms in a file is supported as well. diff --git a/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc index 1db4a4347ba..443ed3c2962 100644 --- a/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc @@ -22,7 +22,7 @@ The `char_group` tokenizer accepts one parameter: [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -37,7 +37,6 @@ POST _analyze "text": "The QUICK brown-fox" } --------------------------- -// CONSOLE returns diff --git a/docs/reference/analysis/tokenizers/classic-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/classic-tokenizer.asciidoc index 83236b03eef..6405e5601b6 100644 --- a/docs/reference/analysis/tokenizers/classic-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/classic-tokenizer.asciidoc @@ -18,7 +18,7 @@ languages other than English: [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -26,7 +26,6 @@ POST _analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } --------------------------- -// CONSOLE ///////////////////// @@ -142,7 +141,7 @@ The `classic` tokenizer accepts the following parameters: In this example, we configure the `classic` tokenizer to have a `max_token_length` of 5 (for demonstration purposes): -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -169,7 +168,6 @@ POST my_index/_analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } ---------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc index d0bd39c4739..bf414e509a6 100644 --- a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc @@ -21,7 +21,7 @@ With the default settings, the `edge_ngram` tokenizer treats the initial text as single token and produces N-grams with minimum length `1` and maximum length `2`: -[source,js] +[source,console] --------------------------- POST _analyze { @@ -29,7 +29,6 @@ POST _analyze "text": "Quick Fox" } --------------------------- -// CONSOLE ///////////////////// @@ -101,7 +100,7 @@ In this example, we configure the `edge_ngram` tokenizer to treat letters and digits as tokens, and to produce grams with minimum length `2` and maximum length `10`: -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -133,7 +132,6 @@ POST my_index/_analyze "text": "2 Quick Foxes." } ---------------------------- -// CONSOLE ///////////////////// @@ -218,7 +216,7 @@ just search for the terms the user has typed in, for instance: `Quick Fo`. Below is an example of how to set up a field for _search-as-you-type_: -[source,js] +[source,console] ----------------------------------- PUT my_index { @@ -277,7 +275,6 @@ GET my_index/_search } } ----------------------------------- -// CONSOLE <1> The `autocomplete` analyzer indexes the terms `[qu, qui, quic, quick, fo, fox, foxe, foxes]`. <2> The `autocomplete_search` analyzer searches for the terms `[quick, fo]`, both of which appear in the index. diff --git a/docs/reference/analysis/tokenizers/keyword-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/keyword-tokenizer.asciidoc index ff560904818..ebbb42baa5d 100644 --- a/docs/reference/analysis/tokenizers/keyword-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/keyword-tokenizer.asciidoc @@ -8,7 +8,7 @@ with token filters to normalise output, e.g. lower-casing email addresses. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -16,7 +16,6 @@ POST _analyze "text": "New York" } --------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/tokenizers/letter-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/letter-tokenizer.asciidoc index c21a913986d..84bf757b25b 100644 --- a/docs/reference/analysis/tokenizers/letter-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/letter-tokenizer.asciidoc @@ -9,7 +9,7 @@ not separated by spaces. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -17,7 +17,6 @@ POST _analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } --------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc index b88d8d47e8d..927b1742ca6 100644 --- a/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/lowercase-tokenizer.asciidoc @@ -14,7 +14,7 @@ efficient as it performs both steps in a single pass. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -22,7 +22,6 @@ POST _analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } --------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/tokenizers/ngram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/ngram-tokenizer.asciidoc index 509f088b6a3..7a26304e455 100644 --- a/docs/reference/analysis/tokenizers/ngram-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/ngram-tokenizer.asciidoc @@ -17,7 +17,7 @@ With the default settings, the `ngram` tokenizer treats the initial text as a single token and produces N-grams with minimum length `1` and maximum length `2`: -[source,js] +[source,console] --------------------------- POST _analyze { @@ -25,7 +25,6 @@ POST _analyze "text": "Quick Fox" } --------------------------- -// CONSOLE ///////////////////// @@ -206,7 +205,7 @@ difference between `max_gram` and `min_gram`. In this example, we configure the `ngram` tokenizer to treat letters and digits as tokens, and to produce tri-grams (grams of length `3`): -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -238,7 +237,6 @@ POST my_index/_analyze "text": "2 Quick Foxes." } ---------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc index ee02d66e403..646554b7a39 100644 --- a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc +++ b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer-examples.asciidoc @@ -14,7 +14,7 @@ Some sample documents are then indexed to represent some file paths for photos inside photo folders of two different users. -[source,js] +[source,console] -------------------------------------------------- PUT file-path-test { @@ -85,7 +85,6 @@ POST file-path-test/_doc/5 "file_path": "/User/bob/photos/2017/05/16/my_photo1.jpg" } -------------------------------------------------- -// CONSOLE // TESTSETUP @@ -94,7 +93,7 @@ the example documents, with Bob's documents ranking highest due to `bob` also being one of the terms created by the standard analyzer boosting relevance for Bob's documents. -[source,js] +[source,console] -------------------------------------------------- GET file-path-test/_search { @@ -105,13 +104,11 @@ GET file-path-test/_search } } -------------------------------------------------- -// CONSOLE - It's simple to match or filter documents with file paths that exist within a particular directory using the `file_path.tree` field. -[source,js] +[source,console] -------------------------------------------------- GET file-path-test/_search { @@ -122,7 +119,6 @@ GET file-path-test/_search } } -------------------------------------------------- -// CONSOLE With the reverse parameter for this tokenizer, it's also possible to match from the other end of the file path, such as individual file names or a deep @@ -131,7 +127,7 @@ level subdirectory. The following example shows a search for all files named configured to use the reverse parameter in the mapping. -[source,js] +[source,console] -------------------------------------------------- GET file-path-test/_search { @@ -144,14 +140,12 @@ GET file-path-test/_search } } -------------------------------------------------- -// CONSOLE - Viewing the tokens generated with both forward and reverse is instructive in showing the tokens created for the same file path value. -[source,js] +[source,console] -------------------------------------------------- POST file-path-test/_analyze { @@ -165,14 +159,13 @@ POST file-path-test/_analyze "text": "/User/alice/photos/2017/05/16/my_photo1.jpg" } -------------------------------------------------- -// CONSOLE It's also useful to be able to filter with file paths when combined with other types of searches, such as this example looking for any files paths with `16` that also must be in Alice's photo directory. -[source,js] +[source,console] -------------------------------------------------- GET file-path-test/_search { @@ -188,4 +181,3 @@ GET file-path-test/_search } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc index d5cba041308..e431f77c653 100644 --- a/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/pathhierarchy-tokenizer.asciidoc @@ -8,7 +8,7 @@ tree. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -16,7 +16,6 @@ POST _analyze "text": "/one/two/three" } --------------------------- -// CONSOLE ///////////////////// @@ -90,7 +89,7 @@ The `path_hierarchy` tokenizer accepts the following parameters: In this example, we configure the `path_hierarchy` tokenizer to split on `-` characters, and to replace them with `/`. The first two tokens are skipped: -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -119,7 +118,6 @@ POST my_index/_analyze "text": "one-two-three-four-five" } ---------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc index 488e3209ef0..c1f49e4da22 100644 --- a/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/pattern-tokenizer.asciidoc @@ -25,7 +25,7 @@ Read more about http://www.regular-expressions.info/catastrophic.html[pathologic [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -33,7 +33,6 @@ POST _analyze "text": "The foo_bar_size's default is 5." } --------------------------- -// CONSOLE ///////////////////// @@ -122,7 +121,7 @@ The `pattern` tokenizer accepts the following parameters: In this example, we configure the `pattern` tokenizer to break text into tokens when it encounters commas: -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -149,7 +148,6 @@ POST my_index/_analyze "text": "comma,separated,values" } ---------------------------- -// CONSOLE ///////////////////// @@ -211,7 +209,7 @@ escaped, so the pattern ends up looking like: \"((?:\\\\\"|[^\"]|\\\\\")+)\" -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -239,7 +237,6 @@ POST my_index/_analyze "text": "\"value\", \"value with embedded \\\" quote\"" } ---------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/tokenizers/simplepattern-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/simplepattern-tokenizer.asciidoc index 5e6a4289398..5da001640a0 100644 --- a/docs/reference/analysis/tokenizers/simplepattern-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/simplepattern-tokenizer.asciidoc @@ -34,7 +34,7 @@ The `simple_pattern` tokenizer accepts the following parameters: This example configures the `simple_pattern` tokenizer to produce terms that are three-digit numbers -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -61,7 +61,6 @@ POST my_index/_analyze "text": "fd-786-335-514-x" } ---------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc index 21d7e1b8ced..55be14c4563 100644 --- a/docs/reference/analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc @@ -35,7 +35,7 @@ The `simple_pattern_split` tokenizer accepts the following parameters: This example configures the `simple_pattern_split` tokenizer to split the input text on underscores. -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -62,7 +62,6 @@ POST my_index/_analyze "text": "an_underscored_phrase" } ---------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc index 5156bc32470..0db5cc1186b 100644 --- a/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/standard-tokenizer.asciidoc @@ -9,7 +9,7 @@ for most languages. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -17,7 +17,6 @@ POST _analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } --------------------------- -// CONSOLE ///////////////////// @@ -133,7 +132,7 @@ The `standard` tokenizer accepts the following parameters: In this example, we configure the `standard` tokenizer to have a `max_token_length` of 5 (for demonstration purposes): -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -160,7 +159,6 @@ POST my_index/_analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } ---------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/tokenizers/thai-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/thai-tokenizer.asciidoc index ee680f20897..4c6298cc67b 100644 --- a/docs/reference/analysis/tokenizers/thai-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/thai-tokenizer.asciidoc @@ -13,7 +13,7 @@ consider using the {plugins}/analysis-icu-tokenizer.html[ICU Tokenizer] instead. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -21,7 +21,6 @@ POST _analyze "text": "การที่ได้ต้องแสดงว่างานดี" } --------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/tokenizers/uaxurlemail-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/uaxurlemail-tokenizer.asciidoc index b474c396572..7bb28e112e5 100644 --- a/docs/reference/analysis/tokenizers/uaxurlemail-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/uaxurlemail-tokenizer.asciidoc @@ -7,7 +7,7 @@ recognises URLs and email addresses as single tokens. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -15,7 +15,6 @@ POST _analyze "text": "Email me at john.smith@global-international.com" } --------------------------- -// CONSOLE ///////////////////// @@ -89,7 +88,7 @@ The `uax_url_email` tokenizer accepts the following parameters: In this example, we configure the `uax_url_email` tokenizer to have a `max_token_length` of 5 (for demonstration purposes): -[source,js] +[source,console] ---------------------------- PUT my_index { @@ -116,7 +115,6 @@ POST my_index/_analyze "text": "john.smith@global-international.com" } ---------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/analysis/tokenizers/whitespace-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/whitespace-tokenizer.asciidoc index f1595b090d7..8d69f6ecc92 100644 --- a/docs/reference/analysis/tokenizers/whitespace-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/whitespace-tokenizer.asciidoc @@ -7,7 +7,7 @@ whitespace character. [float] === Example output -[source,js] +[source,console] --------------------------- POST _analyze { @@ -15,7 +15,6 @@ POST _analyze "text": "The 2 QUICK Brown-Foxes jumped over the lazy dog's bone." } --------------------------- -// CONSOLE ///////////////////// diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index 7e633499a8c..699f4bf3d68 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -70,7 +70,7 @@ calendars than the Gregorian calendar. You must enclose date math index name expressions within angle brackets, and all special characters should be URI encoded. For example: -[source,js] +[source,console] ---------------------------------------------------------------------- # GET //_search GET /%3Clogstash-%7Bnow%2Fd%7D%3E/_search @@ -82,7 +82,6 @@ GET /%3Clogstash-%7Bnow%2Fd%7D%3E/_search } } ---------------------------------------------------------------------- -// CONSOLE // TEST[s/^/PUT logstash-2016.09.20\n/] // TEST[s/now/2016.09.20||/] @@ -125,7 +124,7 @@ The following example shows a search request that searches the Logstash indices three days, assuming the indices use the default Logstash index name format, `logstash-yyyy.MM.dd`. -[source,js] +[source,console] ---------------------------------------------------------------------- # GET /,,/_search GET /%3Clogstash-%7Bnow%2Fd-2d%7D%3E%2C%3Clogstash-%7Bnow%2Fd-1d%7D%3E%2C%3Clogstash-%7Bnow%2Fd%7D%3E/_search @@ -137,7 +136,6 @@ GET /%3Clogstash-%7Bnow%2Fd-2d%7D%3E%2C%3Clogstash-%7Bnow%2Fd-1d%7D%3E%2C%3Clogs } } ---------------------------------------------------------------------- -// CONSOLE // TEST[s/^/PUT logstash-2016.09.20\nPUT logstash-2016.09.19\nPUT logstash-2016.09.18\n/] // TEST[s/now/2016.09.20||/] @@ -213,11 +211,10 @@ All REST APIs accept a `filter_path` parameter that can be used to reduce the response returned by Elasticsearch. This parameter takes a comma separated list of filters expressed with the dot notation: -[source,js] +[source,console] -------------------------------------------------- GET /_search?q=elasticsearch&filter_path=took,hits.hits._id,hits.hits._score -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Responds: @@ -242,11 +239,10 @@ Responds: It also supports the `*` wildcard character to match any field or part of a field's name: -[source,sh] +[source,console] -------------------------------------------------- GET /_cluster/state?filter_path=metadata.indices.*.stat* -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT twitter\n/] Responds: @@ -266,11 +262,10 @@ And the `**` wildcard can be used to include fields without knowing the exact path of the field. For example, we can return the Lucene version of every segment with this request: -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/state?filter_path=routing_table.indices.**.state -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT twitter\n/] Responds: @@ -292,11 +287,10 @@ Responds: It is also possible to exclude one or more fields by prefixing the filter with the char `-`: -[source,js] +[source,console] -------------------------------------------------- GET /_count?filter_path=-_shards -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Responds: @@ -312,11 +306,10 @@ And for more control, both inclusive and exclusive filters can be combined in th this case, the exclusive filters will be applied first and the result will be filtered again using the inclusive filters: -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/state?filter_path=metadata.indices.*.state,-metadata.indices.logstash-* -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT index-1\nPUT index-2\nPUT index-3\nPUT logstash-2016.01\n/] Responds: @@ -340,7 +333,7 @@ consider combining the already existing `_source` parameter (see <> for more details) with the `filter_path` parameter like this: -[source,js] +[source,console] -------------------------------------------------- POST /library/book?refresh {"title": "Book #1", "rating": 200.1} @@ -350,7 +343,6 @@ POST /library/book?refresh {"title": "Book #3", "rating": 0.1} GET /_search?filter_path=hits.hits._source&_source=title&sort=rating:desc -------------------------------------------------- -// CONSOLE [source,console-result] -------------------------------------------------- @@ -374,11 +366,10 @@ GET /_search?filter_path=hits.hits._source&_source=title&sort=rating:desc The `flat_settings` flag affects rendering of the lists of settings. When the `flat_settings` flag is `true`, settings are returned in a flat format: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_settings?flat_settings=true -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Returns: @@ -405,11 +396,10 @@ Returns: When the `flat_settings` flag is `false`, settings are returned in a more human readable structured format: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_settings?flat_settings=false -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Returns: @@ -570,11 +560,10 @@ stack trace of the error. You can enable that behavior by setting the `error_trace` url parameter to `true`. For example, by default when you send an invalid `size` parameter to the `_search` API: -[source,js] +[source,console] ---------------------------------------------------------------------- POST /twitter/_search?size=surprise_me ---------------------------------------------------------------------- -// CONSOLE // TEST[s/surprise_me/surprise_me&error_trace=false/ catch:bad_request] // Since the test system sends error_trace=true by default we have to override @@ -603,11 +592,10 @@ The response looks like: But if you set `error_trace=true`: -[source,js] +[source,console] ---------------------------------------------------------------------- POST /twitter/_search?size=surprise_me&error_trace=true ---------------------------------------------------------------------- -// CONSOLE // TEST[catch:bad_request] The response looks like: diff --git a/docs/reference/cat.asciidoc b/docs/reference/cat.asciidoc index 20e6a53a612..25cf92a3467 100644 --- a/docs/reference/cat.asciidoc +++ b/docs/reference/cat.asciidoc @@ -24,11 +24,10 @@ the available commands. Each of the commands accepts a query string parameter `v` to turn on verbose output. For example: -[source,js] +[source,console] -------------------------------------------------- GET /_cat/master?v -------------------------------------------------- -// CONSOLE Might respond with: @@ -46,11 +45,10 @@ u_n93zwxThWHi1PDBJAGAg 127.0.0.1 127.0.0.1 u_n93zw Each of the commands accepts a query string parameter `help` which will output its available columns. For example: -[source,js] +[source,console] -------------------------------------------------- GET /_cat/master?help -------------------------------------------------- -// CONSOLE Might respond with: @@ -75,11 +73,10 @@ instead. Each of the commands accepts a query string parameter `h` which forces only those columns to appear. For example: -[source,js] +[source,console] -------------------------------------------------- GET /_cat/nodes?h=ip,port,heapPercent,name -------------------------------------------------- -// CONSOLE Responds with: diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index ffb6341a565..60f2d8ed9cb 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -42,7 +42,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] //// Hidden setup for example: -[source,js] +[source,console] -------------------------------------------------- PUT test1 { @@ -65,14 +65,12 @@ PUT test1 } } -------------------------------------------------- -// CONSOLE //// -[source,js] +[source,console] -------------------------------------------------- GET /_cat/aliases?v -------------------------------------------------- -// CONSOLE // TEST[continued] The API returns the following response: diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index a02e4ba24d5..c089d3a855f 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -41,11 +41,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-allocation-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET /_cat/allocation?v -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT test\n{"settings": {"number_of_replicas": 0}}\n/] The API returns the following response: diff --git a/docs/reference/cat/count.asciidoc b/docs/reference/cat/count.asciidoc index f10e758a4c8..e38151ce4b5 100644 --- a/docs/reference/cat/count.asciidoc +++ b/docs/reference/cat/count.asciidoc @@ -50,11 +50,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] The following `count` API request retrieves the document count of a single index, `twitter`. -[source,js] +[source,console] -------------------------------------------------- GET /_cat/count/twitter?v -------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] @@ -73,11 +72,10 @@ epoch timestamp count The following `count` API request retrieves the document count of all indices in the cluster. -[source,js] +[source,console] -------------------------------------------------- GET /_cat/count?v -------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] // TEST[s/^/POST test\/test\?refresh\n{"test": "test"}\n/] diff --git a/docs/reference/cat/fielddata.asciidoc b/docs/reference/cat/fielddata.asciidoc index e261e6919eb..f63462f8e04 100644 --- a/docs/reference/cat/fielddata.asciidoc +++ b/docs/reference/cat/fielddata.asciidoc @@ -47,7 +47,8 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] //// Hidden setup snippet to build an index with fielddata so our results are real: -[source,js] + +[source,console] -------------------------------------------------- PUT test { @@ -78,7 +79,6 @@ POST test/_doc?refresh # Perform a search to load the field data POST test/_search?sort=body,soul,mind -------------------------------------------------- -// CONSOLE //// [[cat-fielddata-api-example-ind]] @@ -88,11 +88,10 @@ You can specify an individual field in the request body or URL path. The following `fieldata` API request retrieves heap memory size information for the `body` field. -[source,js] +[source,console] -------------------------------------------------- GET /_cat/fielddata?v&fields=body -------------------------------------------------- -// CONSOLE // TEST[continued] The API returns the following response: @@ -113,11 +112,10 @@ path. The following `fieldata` API request retrieves heap memory size information for the `body` and `soul` fields. -[source,js] +[source,console] -------------------------------------------------- GET /_cat/fielddata/body,soul?v -------------------------------------------------- -// CONSOLE // TEST[continued] The API returns the following response: @@ -140,11 +138,10 @@ one row per field per node. The following `fieldata` API request retrieves heap memory size information all fields. -[source,js] +[source,console] -------------------------------------------------- GET /_cat/fielddata?v -------------------------------------------------- -// CONSOLE // TEST[continued] The API returns the following response: diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc index bea8d6b695d..803f564d2c6 100644 --- a/docs/reference/cat/health.asciidoc +++ b/docs/reference/cat/health.asciidoc @@ -67,11 +67,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] By default, the cat health API returns `HH:MM:SS` and https://en.wikipedia.org/wiki/Unix_time[Unix `epoch`] timestamps. For example: -[source,js] +[source,console] -------------------------------------------------- GET /_cat/health?v -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT twitter\n{"settings":{"number_of_replicas": 0}}\n/] The API returns the following response: @@ -88,11 +87,10 @@ epoch timestamp cluster status node.total node.data shards pri relo i ===== Example without a timestamp You can use the `ts` (timestamps) parameter to disable timestamps. For example: -[source,js] +[source,console] -------------------------------------------------- GET /_cat/health?v&ts=false -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT twitter\n{"settings":{"number_of_replicas": 0}}\n/] The API returns the following response: diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 9a228fd3254..083f8738430 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -84,11 +84,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] ==== {api-examples-title} [[examples]] -[source,js] +[source,console] -------------------------------------------------- GET /_cat/indices/twi*?v&s=index -------------------------------------------------- -// CONSOLE // TEST[setup:huge_twitter] // TEST[s/^/PUT twitter2\n{"settings": {"number_of_replicas": 0}}\n/] diff --git a/docs/reference/cat/master.asciidoc b/docs/reference/cat/master.asciidoc index 84150d0fcf8..b1263297ee6 100644 --- a/docs/reference/cat/master.asciidoc +++ b/docs/reference/cat/master.asciidoc @@ -35,11 +35,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-master-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET /_cat/master?v -------------------------------------------------- -// CONSOLE The API returns the following response: diff --git a/docs/reference/cat/nodeattrs.asciidoc b/docs/reference/cat/nodeattrs.asciidoc index 4fb6a42c09b..6b916ca1660 100644 --- a/docs/reference/cat/nodeattrs.asciidoc +++ b/docs/reference/cat/nodeattrs.asciidoc @@ -65,11 +65,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-nodeattrs-api-ex-default]] ===== Example with default columns -[source,js] +[source,console] -------------------------------------------------- GET /_cat/nodeattrs?v -------------------------------------------------- -// CONSOLE // TEST[s/\?v/\?v&s=node,attr/] // Sort the resulting attributes so we can assert on them more easily @@ -97,11 +96,10 @@ The `attr` and `value` columns return custom node attributes, one per line. The following API request returns the `name`, `pid`, `attr`, and `value` columns. -[source,js] +[source,console] -------------------------------------------------- GET /_cat/nodeattrs?v&h=name,pid,attr,value -------------------------------------------------- -// CONSOLE // TEST[s/,value/,value&s=node,attr/] // Sort the resulting attributes so we can assert on them more easily diff --git a/docs/reference/cat/nodes.asciidoc b/docs/reference/cat/nodes.asciidoc index 7af6ab1c05a..adcf94f0ca7 100644 --- a/docs/reference/cat/nodes.asciidoc +++ b/docs/reference/cat/nodes.asciidoc @@ -296,11 +296,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-nodes-api-ex-default]] ===== Example with default columns -[source,js] +[source,console] -------------------------------------------------- GET /_cat/nodes?v -------------------------------------------------- -// CONSOLE The API returns the following response: @@ -325,11 +324,10 @@ monitoring an entire cluster, particularly large ones. The following API request returns the `id`, `ip`, `port`, `v` (version), and `m` (master) columns. -[source,js] +[source,console] -------------------------------------------------- GET /_cat/nodes?v&h=id,ip,port,v,m -------------------------------------------------- -// CONSOLE The API returns the following response: diff --git a/docs/reference/cat/pending_tasks.asciidoc b/docs/reference/cat/pending_tasks.asciidoc index 290268bfac8..d6c0cfefdde 100644 --- a/docs/reference/cat/pending_tasks.asciidoc +++ b/docs/reference/cat/pending_tasks.asciidoc @@ -33,11 +33,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-pending-tasks-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET /_cat/pending_tasks?v -------------------------------------------------- -// CONSOLE The API returns the following response: diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc index fcc562d75b6..981b717527c 100644 --- a/docs/reference/cat/plugins.asciidoc +++ b/docs/reference/cat/plugins.asciidoc @@ -34,11 +34,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-plugins-api-example]] ==== {api-examples-title} -[source,js] +[source,console] ------------------------------------------------------------------------------ GET /_cat/plugins?v&s=component&h=name,component,version,description ------------------------------------------------------------------------------ -// CONSOLE The API returns the following response: diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc index 374cb4cd3bf..d77a7301b1c 100644 --- a/docs/reference/cat/recovery.asciidoc +++ b/docs/reference/cat/recovery.asciidoc @@ -60,11 +60,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-recovery-api-ex-dead]] ===== Example with no ongoing recoveries -[source,js] +[source,console] ---------------------------------------------------------------------------- GET _cat/recovery?v ---------------------------------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The API returns the following response: @@ -90,11 +89,10 @@ start. By increasing the replica count of an index and bringing another node online to host the replicas, you can retrieve information about an ongoing recovery. -[source,js] +[source,console] ---------------------------------------------------------------------------- GET _cat/recovery?v&h=i,s,t,ty,st,shost,thost,f,fp,b,bp ---------------------------------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The API returns the following response: @@ -121,11 +119,10 @@ You can restore backups of an index using the <> API. You can use the cat recovery API retrieve information about a snapshot recovery. -[source,js] +[source,console] -------------------------------------------------------------------------------- GET _cat/recovery?v&h=i,s,t,ty,st,rep,snap,f,fp,b,bp -------------------------------------------------------------------------------- -// CONSOLE // TEST[skip:no need to execute snapshot/restore here] The API returns the following response with a recovery type of `snapshot`: diff --git a/docs/reference/cat/repositories.asciidoc b/docs/reference/cat/repositories.asciidoc index 9bfa56a7c72..750320f95f5 100644 --- a/docs/reference/cat/repositories.asciidoc +++ b/docs/reference/cat/repositories.asciidoc @@ -34,11 +34,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-repositories-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET /_cat/repositories?v -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT \/_snapshot\/repo1\n{"type": "fs", "settings": {"location": "repo\/1"}}\n/] The API returns the following response: diff --git a/docs/reference/cat/segments.asciidoc b/docs/reference/cat/segments.asciidoc index 59fefaa309b..4ed14082392 100644 --- a/docs/reference/cat/segments.asciidoc +++ b/docs/reference/cat/segments.asciidoc @@ -102,11 +102,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-segments-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET /_cat/segments?v -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT \/test\/test\/1?refresh\n{"test":"test"}\nPUT \/test1\/test\/1?refresh\n{"test":"test"}\n/] The API returns the following response: diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index f4418b80779..ee6a5637402 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -278,11 +278,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-shards-api-example-single]] ===== Example with a single index -[source,js] +[source,console] --------------------------------------------------------------------------- GET _cat/shards --------------------------------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The API returns the following response: @@ -305,11 +304,10 @@ path parameter to limit the API request. The following request returns information for any indices beginning with `twitt`. -[source,js] +[source,console] --------------------------------------------------------------------------- GET _cat/shards/twitt* --------------------------------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The API returns the following response: @@ -327,11 +325,10 @@ twitter 0 p STARTED 3014 31.1mb 192.168.56.10 H5dfFeA [[relocation]] ===== Example with a relocating shard -[source,js] +[source,console] --------------------------------------------------------------------------- GET _cat/shards --------------------------------------------------------------------------- -// CONSOLE // TEST[skip:for now, relocation cannot be recreated] The API returns the following response: @@ -351,11 +348,10 @@ relocating. Before a shard is available for use, it goes through an `INITIALIZING` state. You can use the cat shards API to see which shards are initializing. -[source,js] +[source,console] --------------------------------------------------------------------------- GET _cat/shards --------------------------------------------------------------------------- -// CONSOLE // TEST[skip:there is no guarantee to test for shards in initializing state] The API returns the following response: @@ -373,11 +369,10 @@ The following request returns the `unassigned.reason` column, which indicates why a shard is unassigned. -[source,js] +[source,console] --------------------------------------------------------------------------- GET _cat/shards?h=index,shard,prirep,state,unassigned.reason --------------------------------------------------------------------------- -// CONSOLE // TEST[skip:for now] The API returns the following response: diff --git a/docs/reference/cat/snapshots.asciidoc b/docs/reference/cat/snapshots.asciidoc index 70a0a4d082b..e66b1b66127 100644 --- a/docs/reference/cat/snapshots.asciidoc +++ b/docs/reference/cat/snapshots.asciidoc @@ -109,11 +109,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-snapshots-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET /_cat/snapshots/repo1?v&s=id -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT \/_snapshot\/repo1\/snap1?wait_for_completion=true\n/] // TEST[s/^/PUT \/_snapshot\/repo1\/snap2?wait_for_completion=true\n/] // TEST[s/^/PUT \/_snapshot\/repo1\n{"type": "fs", "settings": {"location": "repo\/1"}}\n/] diff --git a/docs/reference/cat/templates.asciidoc b/docs/reference/cat/templates.asciidoc index 96c28aae438..d9a552c9f85 100644 --- a/docs/reference/cat/templates.asciidoc +++ b/docs/reference/cat/templates.asciidoc @@ -44,11 +44,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-templates-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET /_cat/templates?v&s=name -------------------------------------------------- -// CONSOLE // TEST[s/templates/templates\/template*/] // TEST[s/^/PUT _template\/template0\n{"index_patterns": "te*", "order": 0}\n/] // TEST[s/^/PUT _template\/template1\n{"index_patterns": "tea*", "order": 1}\n/] diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index cd7dae666be..7b49874536e 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -118,11 +118,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] [[cat-thread-pool-api-ex-default]] ===== Example with default columns -[source,js] +[source,console] -------------------------------------------------- GET /_cat/thread_pool -------------------------------------------------- -// CONSOLE The API returns the following response: @@ -155,11 +154,10 @@ The following API request returns the `id`, `name`, `active`, `rejected`, and `completed` columns. The request limits returned information to the `generic` thread pool. -[source,js] +[source,console] -------------------------------------------------- GET /_cat/thread_pool/generic?v&h=id,name,active,rejected,completed -------------------------------------------------- -// CONSOLE The API returns the following response: diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index 4bbf9cbda60..2fbd553ad6b 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -13,7 +13,7 @@ Delete auto-follow patterns. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT /_ccr/auto_follow/my_auto_follow_pattern { @@ -25,17 +25,15 @@ PUT /_ccr/auto_follow/my_auto_follow_pattern "follow_index_pattern" : "{{leader_index}}-follower" } -------------------------------------------------- -// CONSOLE // TEST[setup:remote_cluster] // TESTSETUP ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- DELETE /_ccr/auto_follow/ -------------------------------------------------- -// CONSOLE // TEST[s//my_auto_follow_pattern/] [[ccr-delete-auto-follow-pattern-prereqs]] @@ -64,11 +62,10 @@ This API deletes a configured collection of This example deletes an auto-follow pattern collection named `my_auto_follow_pattern`: -[source,js] +[source,console] -------------------------------------------------- DELETE /_ccr/auto_follow/my_auto_follow_pattern -------------------------------------------------- -// CONSOLE // TEST[setup:remote_cluster] The API returns the following result: diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index f621c19564a..dfee7f58673 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -13,7 +13,7 @@ Get auto-follow patterns. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT /_ccr/auto_follow/my_auto_follow_pattern { @@ -25,31 +25,27 @@ PUT /_ccr/auto_follow/my_auto_follow_pattern "follow_index_pattern" : "{{leader_index}}-follower" } -------------------------------------------------- -// CONSOLE // TEST[setup:remote_cluster] // TESTSETUP -[source,js] +[source,console] -------------------------------------------------- DELETE /_ccr/auto_follow/my_auto_follow_pattern -------------------------------------------------- -// CONSOLE // TEST // TEARDOWN ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- GET /_ccr/auto_follow/ -------------------------------------------------- -// CONSOLE -[source,js] +[source,console] -------------------------------------------------- GET /_ccr/auto_follow/ -------------------------------------------------- -// CONSOLE // TEST[s//my_auto_follow_pattern/] [[ccr-get-auto-follow-pattern-prereqs]] @@ -79,11 +75,10 @@ This API will return the specified auto-follow pattern collection. This example retrieves information about an auto-follow pattern collection named `my_auto_follow_pattern`: -[source,js] +[source,console] -------------------------------------------------- GET /_ccr/auto_follow/my_auto_follow_pattern -------------------------------------------------- -// CONSOLE // TEST[setup:remote_cluster] The API returns the following result: diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index 01039080684..04fa137cad7 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -11,7 +11,7 @@ Creates an auto-follow pattern. [[ccr-put-auto-follow-pattern-request]] ==== {api-request-title} -[source,js] +[source,console] -------------------------------------------------- PUT /_ccr/auto_follow/ { @@ -23,7 +23,6 @@ PUT /_ccr/auto_follow/ "follow_index_pattern" : "" } -------------------------------------------------- -// CONSOLE // TEST[setup:remote_cluster] // TEST[s//auto_follow_pattern_name/] // TEST[s//remote_cluster/] @@ -32,11 +31,10 @@ PUT /_ccr/auto_follow/ ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- DELETE /_ccr/auto_follow/auto_follow_pattern_name -------------------------------------------------- -// CONSOLE // TEST[continued] ////////////////////////// @@ -87,7 +85,7 @@ include::../follow-request-body.asciidoc[] This example creates an auto-follow pattern named `my_auto_follow_pattern`: -[source,js] +[source,console] -------------------------------------------------- PUT /_ccr/auto_follow/my_auto_follow_pattern { @@ -109,7 +107,6 @@ PUT /_ccr/auto_follow/my_auto_follow_pattern "read_poll_timeout" : "30s" } -------------------------------------------------- -// CONSOLE // TEST[setup:remote_cluster] The API returns the following result: @@ -123,11 +120,10 @@ The API returns the following result: ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- DELETE /_ccr/auto_follow/my_auto_follow_pattern -------------------------------------------------- -// CONSOLE // TEST[continued] ////////////////////////// diff --git a/docs/reference/ccr/apis/follow-request-body.asciidoc b/docs/reference/ccr/apis/follow-request-body.asciidoc index 1cae2908fb9..2a707c56b2b 100644 --- a/docs/reference/ccr/apis/follow-request-body.asciidoc +++ b/docs/reference/ccr/apis/follow-request-body.asciidoc @@ -49,7 +49,7 @@ ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { @@ -57,22 +57,19 @@ PUT /follower_index/_ccr/follow?wait_for_active_shards=1 "leader_index" : "leader_index" } -------------------------------------------------- -// CONSOLE // TESTSETUP // TEST[setup:remote_cluster_and_leader_index] -[source,js] +[source,console] -------------------------------------------------- POST /follower_index/_ccr/pause_follow -------------------------------------------------- -// CONSOLE // TEARDOWN -[source,js] +[source,console] -------------------------------------------------- GET /follower_index/_ccr/info?filter_path=follower_indices.parameters -------------------------------------------------- -// CONSOLE ////////////////////////// diff --git a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc index 3f702f6dcac..5df2efa863b 100644 --- a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc @@ -13,7 +13,7 @@ Retrieves information about all follower indices. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { @@ -21,24 +21,21 @@ PUT /follower_index/_ccr/follow?wait_for_active_shards=1 "leader_index" : "leader_index" } -------------------------------------------------- -// CONSOLE // TESTSETUP // TEST[setup:remote_cluster_and_leader_index] -[source,js] +[source,console] -------------------------------------------------- POST /follower_index/_ccr/pause_follow -------------------------------------------------- -// CONSOLE // TEARDOWN ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- GET //_ccr/info -------------------------------------------------- -// CONSOLE // TEST[s//follower_index/] [[ccr-get-follow-info-prereqs]] @@ -138,11 +135,10 @@ The `parameters` contains the following fields: This example retrieves follower info: -[source,js] +[source,console] -------------------------------------------------- GET /follower_index/_ccr/info -------------------------------------------------- -// CONSOLE The API returns the following results: diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index f9683e615a6..a83019d3153 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -13,7 +13,7 @@ Get follower stats. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { @@ -21,24 +21,21 @@ PUT /follower_index/_ccr/follow?wait_for_active_shards=1 "leader_index" : "leader_index" } -------------------------------------------------- -// CONSOLE // TESTSETUP // TEST[setup:remote_cluster_and_leader_index] -[source,js] +[source,console] -------------------------------------------------- POST /follower_index/_ccr/pause_follow -------------------------------------------------- -// CONSOLE // TEARDOWN ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- GET //_ccr/stats -------------------------------------------------- -// CONSOLE // TEST[s//follower_index/] [[ccr-get-follow-stats-prereqs]] @@ -205,11 +202,10 @@ NOTE: When the follower is caught up to the leader, this number will This example retrieves follower stats: -[source,js] +[source,console] -------------------------------------------------- GET /follower_index/_ccr/stats -------------------------------------------------- -// CONSOLE The API returns the following results: diff --git a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc index 287c9045eb1..adf4508c5cd 100644 --- a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc +++ b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc @@ -13,7 +13,7 @@ Removes the follower retention leases from the leader. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { @@ -21,20 +21,18 @@ PUT /follower_index/_ccr/follow?wait_for_active_shards=1 "leader_index" : "leader_index" } -------------------------------------------------- -// CONSOLE // TESTSETUP // TEST[setup:remote_cluster_and_leader_index] -[source,js] +[source,console] -------------------------------------------------- POST /follower_index/_ccr/pause_follow -------------------------------------------------- -// CONSOLE // TEARDOWN ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST //_ccr/forget_follower { @@ -44,7 +42,6 @@ POST //_ccr/forget_follower "leader_remote_cluster" : "" } -------------------------------------------------- -// CONSOLE // TEST[s//leader_index/] // TEST[s//follower_cluster/] // TEST[s//follower_index/] @@ -127,7 +124,7 @@ the <> is invoked. This example removes the follower retention leases for `follower_index` from `leader_index`. -[source,js] +[source,console] -------------------------------------------------- POST /leader_index/_ccr/forget_follower { @@ -137,7 +134,6 @@ POST /leader_index/_ccr/forget_follower "leader_remote_cluster" : "leader_cluster" } -------------------------------------------------- -// CONSOLE // TEST[skip_shard_failures] The API returns the following result: diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index 856b5c1b8d0..e5881fe5a6c 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -13,7 +13,7 @@ Pauses a follower index. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { @@ -21,17 +21,15 @@ PUT /follower_index/_ccr/follow?wait_for_active_shards=1 "leader_index" : "leader_index" } -------------------------------------------------- -// CONSOLE // TESTSETUP // TEST[setup:remote_cluster_and_leader_index] ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST //_ccr/pause_follow -------------------------------------------------- -// CONSOLE // TEST[s//follower_index/] [[ccr-post-pause-follow-prereqs]] @@ -61,11 +59,10 @@ following task. This example pauses a follower index named `follower_index`: -[source,js] +[source,console] -------------------------------------------------- POST /follower_index/_ccr/pause_follow -------------------------------------------------- -// CONSOLE // TEST The API returns the following result: diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index b6aa6b5d938..b6bd63f0192 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -13,7 +13,7 @@ Resumes a follower index. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { @@ -23,26 +23,23 @@ PUT /follower_index/_ccr/follow?wait_for_active_shards=1 POST /follower_index/_ccr/pause_follow -------------------------------------------------- -// CONSOLE // TESTSETUP // TEST[setup:remote_cluster_and_leader_index] -[source,js] +[source,console] -------------------------------------------------- POST /follower_index/_ccr/pause_follow -------------------------------------------------- -// CONSOLE // TEARDOWN ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST //_ccr/resume_follow { } -------------------------------------------------- -// CONSOLE // TEST[s//follower_index/] // TEST[s//remote_cluster/] // TEST[s//leader_index/] @@ -79,7 +76,7 @@ include::../follow-request-body.asciidoc[] This example resumes a follower index named `follower_index`: -[source,js] +[source,console] -------------------------------------------------- POST /follower_index/_ccr/resume_follow { @@ -95,7 +92,6 @@ POST /follower_index/_ccr/resume_follow "read_poll_timeout" : "30s" } -------------------------------------------------- -// CONSOLE The API returns the following result: diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index cc7023babbb..703f926cf9e 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -13,7 +13,7 @@ Converts a follower index to a regular index. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { @@ -25,17 +25,15 @@ POST /follower_index/_ccr/pause_follow POST /follower_index/_close -------------------------------------------------- -// CONSOLE // TESTSETUP // TEST[setup:remote_cluster_and_leader_index] ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST //_ccr/unfollow -------------------------------------------------- -// CONSOLE // TEST[s//follower_index/] [[ccr-post-unfollow-prereqs]] @@ -68,11 +66,10 @@ irreversible operation. This example converts `follower_index` from a follower index to a regular index: -[source,js] +[source,console] -------------------------------------------------- POST /follower_index/_ccr/unfollow -------------------------------------------------- -// CONSOLE // TEST The API returns the following result: diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index 6bb038b8326..9f31f47a665 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -13,16 +13,15 @@ Creates a follower index. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST /follower_index/_ccr/pause_follow -------------------------------------------------- -// CONSOLE // TEARDOWN ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT //_ccr/follow?wait_for_active_shards=1 { @@ -30,7 +29,6 @@ PUT //_ccr/follow?wait_for_active_shards=1 "leader_index" : "" } -------------------------------------------------- -// CONSOLE // TEST[setup:remote_cluster_and_leader_index] // TEST[s//follower_index/] // TEST[s//remote_cluster/] @@ -87,7 +85,7 @@ include::../follow-request-body.asciidoc[] This example creates a follower index named `follower_index`: -[source,js] +[source,console] -------------------------------------------------- PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { @@ -105,7 +103,6 @@ PUT /follower_index/_ccr/follow?wait_for_active_shards=1 "read_poll_timeout" : "30s" } -------------------------------------------------- -// CONSOLE // TEST[setup:remote_cluster_and_leader_index] The API returns the following result: diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index b4cf77c00f6..c09e7c5426a 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -14,7 +14,7 @@ Get {ccr} stats. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { @@ -22,24 +22,21 @@ PUT /follower_index/_ccr/follow?wait_for_active_shards=1 "leader_index" : "leader_index" } -------------------------------------------------- -// CONSOLE // TESTSETUP // TEST[setup:remote_cluster_and_leader_index] -[source,js] +[source,console] -------------------------------------------------- POST /follower_index/_ccr/pause_follow -------------------------------------------------- -// CONSOLE // TEARDOWN ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- GET /_ccr/stats -------------------------------------------------- -// CONSOLE ==== {api-prereq-title} @@ -93,11 +90,10 @@ This object consists of the following fields: This example retrieves {ccr} stats: -[source,js] +[source,console] -------------------------------------------------- GET /_ccr/stats -------------------------------------------------- -// CONSOLE The API returns the following results: diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index 5d726082205..aa096047bb9 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ccr-getting-started]] -== Getting started with {ccr} +=== Getting started with {ccr} This getting-started guide for {ccr} shows you how to: @@ -12,15 +12,15 @@ This getting-started guide for {ccr} shows you how to: a leader index * <> -[float] + [[ccr-getting-started-before-you-begin]] -=== Before you begin +==== Before you begin . {stack-gs}/get-started-elastic-stack.html#install-elasticsearch[Install {es}] on your local and remote clusters. . Obtain a license that includes the {ccr} features. See https://www.elastic.co/subscriptions[subscriptions] and - <>. + {stack-ov}/license-management.html[License-management]. . If the Elastic {security-features} are enabled in your local and remote clusters, you need a user that has appropriate authority to perform the steps @@ -34,7 +34,7 @@ to control which users have authority to manage {ccr}. By default, you can perform all of the steps in this tutorial by using the built-in `elastic` user. However, a password must be set for this user before the user can do anything. For information about how to set that password, -see <>. +see {stack-ov}/security-getting-started.html[Tutorial: Getting started with security]. If you are performing these steps in a production environment, take extra care because the `elastic` user has the `superuser` role and you could inadvertently @@ -81,16 +81,16 @@ cluster update settings API, you will also need a user with the `all` cluster privilege. -- -[float] + [[ccr-getting-started-remote-cluster]] -=== Connecting to a remote cluster +==== Connecting to a remote cluster The {ccr} features require that you {ref}/modules-remote-clusters.html[connect your local cluster to a remote cluster]. In this tutorial, we will connect our local cluster to a remote cluster with the cluster alias `leader`. -[source,js] +[source,console] -------------------------------------------------- PUT /_cluster/settings { @@ -107,7 +107,6 @@ PUT /_cluster/settings } } -------------------------------------------------- -// CONSOLE // TEST[setup:host] // TEST[s/127.0.0.1:9300/\${transport_host}/] <1> Specifies the hostname and transport port of a seed node in the remote @@ -116,11 +115,10 @@ PUT /_cluster/settings You can verify that the local cluster is successfully connected to the remote cluster. -[source,js] +[source,console] -------------------------------------------------- GET /_remote/info -------------------------------------------------- -// CONSOLE // TEST[continued] The API will respond by showing that the local cluster is connected to the @@ -156,13 +154,13 @@ Alternatively, you can manage remote clusters on the image::images/remote-clusters.jpg["The Remote Clusters page in {kib}"] -[float] + [[ccr-getting-started-leader-index]] -=== Creating a leader index +==== Creating a leader index In the following example, we will create a leader index in the remote cluster: -[source,js] +[source,console] -------------------------------------------------- PUT /server-metrics { @@ -199,12 +197,11 @@ PUT /server-metrics } } -------------------------------------------------- -// CONSOLE // TEST[continued] -[float] + [[ccr-getting-started-follower-index]] -=== Creating a follower index +==== Creating a follower index Follower indices are created with the {ref}/ccr-put-follow.html[create follower API]. When you create a follower index, you must reference the @@ -212,7 +209,7 @@ API]. When you create a follower index, you must reference the <> that you created in the remote cluster. -[source,js] +[source,console] -------------------------------------------------- PUT /server-metrics-copy/_ccr/follow?wait_for_active_shards=1 { @@ -220,7 +217,6 @@ PUT /server-metrics-copy/_ccr/follow?wait_for_active_shards=1 "leader_index" : "server-metrics" } -------------------------------------------------- -// CONSOLE // TEST[continued] ////////////////////////// @@ -248,7 +244,7 @@ inspect the status of replication using the ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST /server-metrics-copy/_ccr/pause_follow @@ -256,14 +252,13 @@ POST /server-metrics-copy/_close POST /server-metrics-copy/_ccr/unfollow -------------------------------------------------- -// CONSOLE // TEST[continued] ////////////////////////// -[float] + [[ccr-getting-started-auto-follow]] -=== Automatically create follower indices +==== Automatically create follower indices The <> feature in {ccr} helps for time series use cases where you want to follow new indices that are periodically created in the @@ -276,7 +271,7 @@ indices you want to automatically follow. For example: -[source,js] +[source,console] -------------------------------------------------- PUT /_ccr/auto_follow/beats { @@ -289,7 +284,6 @@ PUT /_ccr/auto_follow/beats "follow_index_pattern" : "{{leader_index}}-copy" <3> } -------------------------------------------------- -// CONSOLE // TEST[continued] <1> Automatically follow new {metricbeat} indices. <2> Automatically follow new {packetbeat} indices. @@ -309,11 +303,10 @@ PUT /_ccr/auto_follow/beats ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- DELETE /_ccr/auto_follow/beats -------------------------------------------------- -// CONSOLE // TEST[continued] ////////////////////////// diff --git a/docs/reference/ccr/index.asciidoc b/docs/reference/ccr/index.asciidoc index ba2fa0d1e78..cb3d8d4cc1f 100644 --- a/docs/reference/ccr/index.asciidoc +++ b/docs/reference/ccr/index.asciidoc @@ -1,10 +1,7 @@ [role="xpack"] [testenv="platinum"] [[xpack-ccr]] -= {ccr-cap} - -[partintro] --- +== {ccr-cap} The {ccr} (CCR) feature enables replication of indices in remote clusters to a local cluster. This functionality can be used in some common production use @@ -22,7 +19,6 @@ This guide provides an overview of {ccr}: * <> * <> --- include::overview.asciidoc[] include::requirements.asciidoc[] diff --git a/docs/reference/ccr/overview.asciidoc b/docs/reference/ccr/overview.asciidoc index be009b4aefa..8f25cb1da47 100644 --- a/docs/reference/ccr/overview.asciidoc +++ b/docs/reference/ccr/overview.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ccr-overview]] -== Overview +=== Overview {ccr-cap} is done on an index-by-index basis. Replication is @@ -17,8 +17,8 @@ Replication is pull-based. This means that replication is driven by the follower index. This simplifies state management on the leader index and means that {ccr} does not interfere with indexing on the leader index. -[float] -=== Configuring replication + +==== Configuring replication Replication can be configured in two ways: @@ -46,7 +46,7 @@ the process to complete, you can use the `wait_for_active_shards` parameter. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { @@ -54,21 +54,19 @@ PUT /follower_index/_ccr/follow?wait_for_active_shards=1 "leader_index" : "leader_index" } -------------------------------------------------- -// CONSOLE // TESTSETUP // TEST[setup:remote_cluster_and_leader_index] -[source,js] +[source,console] -------------------------------------------------- POST /follower_index/_ccr/pause_follow -------------------------------------------------- -// CONSOLE // TEARDOWN ////////////////////////// -[float] -=== The mechanics of replication + +==== The mechanics of replication While replication is managed at the index level, replication is performed at the shard level. When a follower index is created, it is automatically @@ -130,8 +128,8 @@ closing itself, applying the settings update, and then re-opening itself. The follower index will be unavailable for reads and not replicating writes during this cycle. -[float] -=== Inspecting the progress of replication + +==== Inspecting the progress of replication You can inspect the progress of replication at the shard level with the {ref}/ccr-get-follow-stats.html[get follower stats API]. This API gives you @@ -139,8 +137,8 @@ insight into the read and writes managed by the follower shard task. It also reports read exceptions that can be retried and fatal exceptions that require user intervention. -[float] -=== Pausing and resuming replication + +==== Pausing and resuming replication You can pause replication with the {ref}/ccr-post-pause-follow.html[pause follower API] and then later resume @@ -149,8 +147,8 @@ Using these APIs in tandem enables you to adjust the read and write parameters on the follower shard task if your initial configuration is not suitable for your use case. -[float] -=== Leader index retaining operations for replication + +==== Leader index retaining operations for replication If the follower is unable to replicate operations from a leader for a period of time, the following process can fail due to the leader lacking a complete history @@ -180,8 +178,8 @@ the lease expires. It is valuable to have monitoring in place to detect a follow replication issue prior to the lease expiring so that the problem can be remedied before the follower falls fatally behind. -[float] -=== Remedying a follower that has fallen behind + +==== Remedying a follower that has fallen behind If a follower falls sufficiently behind a leader that it can no longer replicate operations this can be detected in {kib} or by using the @@ -191,7 +189,7 @@ operations this can be detected in {kib} or by using the In order to restart the follower, you must pause the following process, close the index, and the create follower index again. For example: -["source","js"] +[source,console] ---------------------------------------------------------------------- POST /follower_index/_ccr/pause_follow @@ -203,7 +201,6 @@ PUT /follower_index/_ccr/follow?wait_for_active_shards=1 "leader_index" : "leader_index" } ---------------------------------------------------------------------- -// CONSOLE Re-creating the follower index is a destructive action. All of the existing Lucene segment files are deleted on the follower cluster. The @@ -211,8 +208,8 @@ segment files are deleted on the follower cluster. The files from the leader again. After the follower index initializes, the following process starts again. -[float] -=== Terminating replication + +==== Terminating replication You can terminate replication with the {ref}/ccr-post-unfollow.html[unfollow API]. This API converts a follower index diff --git a/docs/reference/ccr/remote-recovery.asciidoc b/docs/reference/ccr/remote-recovery.asciidoc index fcf03cfc728..f96aaad3d09 100644 --- a/docs/reference/ccr/remote-recovery.asciidoc +++ b/docs/reference/ccr/remote-recovery.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[remote-recovery]] -== Remote recovery +=== Remote recovery When you create a follower index, you cannot use it until it is fully initialized. The _remote recovery_ process builds a new copy of a shard on a follower node by diff --git a/docs/reference/ccr/requirements.asciidoc b/docs/reference/ccr/requirements.asciidoc index 91fbd2c7553..2e92d75707b 100644 --- a/docs/reference/ccr/requirements.asciidoc +++ b/docs/reference/ccr/requirements.asciidoc @@ -22,7 +22,7 @@ existing data that you want to replicate from another cluster, you must {ref}/docs-reindex.html[reindex] your data into a new index with soft deletes enabled. -[float] + [[ccr-overview-soft-deletes]] ==== Soft delete settings @@ -42,7 +42,7 @@ The default value is `12h`. For more information about index settings, see {ref}/index-modules.html[Index modules]. -[float] + [[ccr-overview-beats]] ==== Setting soft deletes on indices created by APM Server or Beats @@ -63,7 +63,7 @@ For additional information on controlling the index templates managed by APM Server or Beats, see the relevant documentation on loading the Elasticsearch index template. -[float] + [[ccr-overview-logstash]] ==== Setting soft deletes on indices created by Logstash diff --git a/docs/reference/ccr/upgrading.asciidoc b/docs/reference/ccr/upgrading.asciidoc index 4f8d8409b5b..8f4eb829d46 100644 --- a/docs/reference/ccr/upgrading.asciidoc +++ b/docs/reference/ccr/upgrading.asciidoc @@ -1,7 +1,7 @@ [role="xpack"] [testenv="platinum"] [[ccr-upgrading]] -== Upgrading clusters +=== Upgrading clusters Clusters that are actively using {ccr} require a careful approach to upgrades. Otherwise index following may fail during a rolling upgrade, because of the @@ -17,8 +17,8 @@ following reasons: Rolling upgrading clusters with {ccr} is different in case of uni-directional index following and bi-directional index following. -[float] -=== Uni-directional index following + +==== Uni-directional index following In a uni-directional setup between two clusters, one cluster contains only leader indices, and the other cluster contains only follower indices following @@ -35,8 +35,8 @@ cluster B that follows indices in cluster A and cluster C that follows indices in cluster B. In this case the cluster C should be upgraded first, then cluster B and finally cluster A. -[float] -=== Bi-directional index following + +==== Bi-directional index following In a bi-directional setup between two clusters, each cluster contains both leader and follower indices. diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc index 81b0b2ae3d8..57f62a4faac 100644 --- a/docs/reference/cluster.asciidoc +++ b/docs/reference/cluster.asciidoc @@ -52,7 +52,7 @@ is not supported in the {oss-dist}. Here are some examples of the use of node filters with the <> APIs. -[source,js] +[source,console] -------------------------------------------------- # If no filters are given, the default is to select all nodes GET /_nodes @@ -78,7 +78,6 @@ GET /_nodes/rack:2 GET /_nodes/ra*:2 GET /_nodes/ra*:2* -------------------------------------------------- -// CONSOLE include::cluster/health.asciidoc[] diff --git a/docs/reference/cluster/allocation-explain.asciidoc b/docs/reference/cluster/allocation-explain.asciidoc index 4101ea96af8..851f0a6c076 100644 --- a/docs/reference/cluster/allocation-explain.asciidoc +++ b/docs/reference/cluster/allocation-explain.asciidoc @@ -63,15 +63,14 @@ it finds by sending an empty body for the request. ////// -[source,js] +[source,console] -------------------------------------------------- PUT /myindex -------------------------------------------------- -// CONSOLE // TESTSETUP ////// -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/allocation/explain { @@ -80,12 +79,11 @@ GET /_cluster/allocation/explain "primary": true } -------------------------------------------------- -// CONSOLE ===== Example of the current_node parameter -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/allocation/explain { @@ -95,14 +93,14 @@ GET /_cluster/allocation/explain "current_node": "nodeA" <1> } -------------------------------------------------- -// CONSOLE // TEST[skip:no way of knowing the current_node] + <1> The node where shard 0 currently has a replica on ===== Examples of unassigned primary shard explanations -[source,js] +[source,console] -------------------------------------------------- PUT /idx?master_timeout=1s&timeout=1s {"settings": {"index.routing.allocation.include._name": "non_existent_node"} } @@ -114,7 +112,6 @@ GET /_cluster/allocation/explain "primary": true } -------------------------------------------------- -// CONSOLE The API returns the following response for an unassigned primary shard: diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index 97d946d25ed..553a23fcd8f 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -28,11 +28,10 @@ wait for 50 seconds for the cluster to reach the `yellow` level (if it reaches the `green` or `yellow` status before 50 seconds elapse, it will return at that point): -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/health?wait_for_status=yellow&timeout=50s -------------------------------------------------- -// CONSOLE [[cluster-health-api-path-params]] ==== {api-path-parms-title} @@ -137,11 +136,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] [[cluster-health-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET _cluster/health -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT test1\n/] The API returns the following response in case of a quiet single node cluster @@ -174,9 +172,8 @@ with a single index with one shard and one replica: The following is an example of getting the cluster health at the `shards` level: -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/health/twitter?level=shards -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] diff --git a/docs/reference/cluster/nodes-hot-threads.asciidoc b/docs/reference/cluster/nodes-hot-threads.asciidoc index 4b1d4948df9..ec248f5d253 100644 --- a/docs/reference/cluster/nodes-hot-threads.asciidoc +++ b/docs/reference/cluster/nodes-hot-threads.asciidoc @@ -57,9 +57,8 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] [[cluster-nodes-hot-threads-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET /_nodes/hot_threads GET /_nodes/nodeId1,nodeId2/hot_threads -------------------------------------------------- -// CONSOLE \ No newline at end of file diff --git a/docs/reference/cluster/nodes-info.asciidoc b/docs/reference/cluster/nodes-info.asciidoc index a5a7654930d..d5408ae7cba 100644 --- a/docs/reference/cluster/nodes-info.asciidoc +++ b/docs/reference/cluster/nodes-info.asciidoc @@ -158,7 +158,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] [[cluster-nodes-info-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- # return just process GET /_nodes/process @@ -175,7 +175,6 @@ GET /_nodes/nodeId1,nodeId2/info/jvm,process # return all the information of only nodeId1 and nodeId2 GET /_nodes/nodeId1,nodeId2/_all -------------------------------------------------- -// CONSOLE The `_all` flag can be set to return all the information - or you can omit it. @@ -186,11 +185,10 @@ The `_all` flag can be set to return all the information - or you can omit it. If `plugins` is specified, the result will contain details about the installed plugins and modules: -[source,js] +[source,console] -------------------------------------------------- GET /_nodes/plugins -------------------------------------------------- -// CONSOLE // TEST[setup:node] The API returns the following response: @@ -258,11 +256,10 @@ The API returns the following response: If `ingest` is specified, the response contains details about the available processors per node: -[source,js] +[source,console] -------------------------------------------------- GET /_nodes/ingest -------------------------------------------------- -// CONSOLE // TEST[setup:node] The API returns the following response: diff --git a/docs/reference/cluster/nodes-reload-secure-settings.asciidoc b/docs/reference/cluster/nodes-reload-secure-settings.asciidoc index b99f93dfc53..1ef75d07e22 100644 --- a/docs/reference/cluster/nodes-reload-secure-settings.asciidoc +++ b/docs/reference/cluster/nodes-reload-secure-settings.asciidoc @@ -9,12 +9,11 @@ reinitialized without restarting the node. The operation is complete when all compatible plugins have finished reinitializing. Subsequently, the keystore is closed and any changes to it will not be reflected on the node. -[source,js] +[source,console] -------------------------------------------------- POST _nodes/reload_secure_settings POST _nodes/nodeId1,nodeId2/reload_secure_settings -------------------------------------------------- -// CONSOLE // TEST[setup:node] // TEST[s/nodeId1,nodeId2/*/] diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 521b3f18329..1d8d92bec41 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -389,7 +389,7 @@ keyed by node. For each node: [[cluster-nodes-stats-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- # return just indices GET /_nodes/stats/indices @@ -400,7 +400,6 @@ GET /_nodes/stats/os,process # return just process for node with IP address 10.0.0.1 GET /_nodes/10.0.0.1/stats/process -------------------------------------------------- -// CONSOLE All stats can be explicitly requested via `/_nodes/stats/_all` or `/_nodes/stats?metric=_all`. @@ -408,7 +407,7 @@ All stats can be explicitly requested via `/_nodes/stats/_all` or You can get information about indices stats on `node`, `indices`, or `shards` level. -[source,js] +[source,console] -------------------------------------------------- # Fielddata summarized by node GET /_nodes/stats/indices/fielddata?fields=field1,field2 @@ -422,12 +421,11 @@ GET /_nodes/stats/indices/fielddata?level=shards&fields=field1,field2 # You can use wildcards for field names GET /_nodes/stats/indices/fielddata?fields=field* -------------------------------------------------- -// CONSOLE You can get statistics about search groups for searches executed on this node. -[source,js] +[source,console] -------------------------------------------------- # All groups with all stats GET /_nodes/stats?groups=_all @@ -435,4 +433,3 @@ GET /_nodes/stats?groups=_all # Some groups from just the indices stats GET /_nodes/stats/indices?groups=foo,bar -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/cluster/nodes-usage.asciidoc b/docs/reference/cluster/nodes-usage.asciidoc index 5b1559790c3..24032d914e8 100644 --- a/docs/reference/cluster/nodes-usage.asciidoc +++ b/docs/reference/cluster/nodes-usage.asciidoc @@ -54,11 +54,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] Rest action example: -[source,js] +[source,console] -------------------------------------------------- GET _nodes/usage -------------------------------------------------- -// CONSOLE // TEST[setup:node] The API returns the following response: diff --git a/docs/reference/cluster/reroute.asciidoc b/docs/reference/cluster/reroute.asciidoc index 62a2afbe942..9d7b3ae6219 100644 --- a/docs/reference/cluster/reroute.asciidoc +++ b/docs/reference/cluster/reroute.asciidoc @@ -173,7 +173,7 @@ will be deleted or overwritten. This is a short example of a simple reroute API call: -[source,js] +[source,console] -------------------------------------------------- POST /_cluster/reroute { @@ -193,5 +193,4 @@ POST /_cluster/reroute ] } -------------------------------------------------- -// CONSOLE // TEST[skip:doc tests run with only a single node] diff --git a/docs/reference/cluster/state.asciidoc b/docs/reference/cluster/state.asciidoc index 8c09d31de2c..554925a3b17 100644 --- a/docs/reference/cluster/state.asciidoc +++ b/docs/reference/cluster/state.asciidoc @@ -124,26 +124,21 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] The following example returns only `metadata` and `routing_table` data for the `foo` and `bar` indices: -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/state/metadata,routing_table/foo,bar -------------------------------------------------- -// CONSOLE The next example returns everything for the `foo` and `bar` indices: -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/state/_all/foo,bar -------------------------------------------------- -// CONSOLE This example returns only the `blocks` metadata: -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/state/blocks -------------------------------------------------- -// CONSOLE - - diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index d35d832eaf0..aafd6fd4b99 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -39,11 +39,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] [[cluster-stats-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/stats?human&pretty -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The API returns the following response: @@ -263,8 +262,7 @@ The API returns the following response: This API can be restricted to a subset of the nodes using <>: -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/stats/nodes/node1,node*,master:false -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/cluster/tasks.asciidoc b/docs/reference/cluster/tasks.asciidoc index 02586cb9677..3c96639b385 100644 --- a/docs/reference/cluster/tasks.asciidoc +++ b/docs/reference/cluster/tasks.asciidoc @@ -49,13 +49,12 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] [[tasks-api-examples]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET _tasks <1> GET _tasks?nodes=nodeId1,nodeId2 <2> GET _tasks?nodes=nodeId1,nodeId2&actions=cluster:* <3> -------------------------------------------------- -// CONSOLE // TEST[skip:No tasks to retrieve] <1> Retrieves all tasks currently running on all nodes in the cluster. @@ -104,22 +103,20 @@ The API returns the following result: It is also possible to retrieve information for a particular task. The following example retrieves information about task `oTUltX4IQMOUUVeiohTt8A:124`: -[source,js] +[source,console] -------------------------------------------------- GET _tasks/oTUltX4IQMOUUVeiohTt8A:124 -------------------------------------------------- -// CONSOLE // TEST[catch:missing] If the task isn't found, the API returns a 404. To retrieve all children of a particular task: -[source,js] +[source,console] -------------------------------------------------- GET _tasks?parent_task_id=oTUltX4IQMOUUVeiohTt8A:123 -------------------------------------------------- -// CONSOLE If the parent isn't found, the API does not return a 404. @@ -131,11 +128,10 @@ the running tasks. This is useful for telling one task from another but is more costly to execute. For example, fetching all searches using the `detailed` request parameter: -[source,js] +[source,console] -------------------------------------------------- GET _tasks?actions=*search&detailed -------------------------------------------------- -// CONSOLE // TEST[skip:No tasks to retrieve] The API returns the following result: @@ -195,21 +191,19 @@ The task API can also be used to wait for completion of a particular task. The following call will block for 10 seconds or until the task with id `oTUltX4IQMOUUVeiohTt8A:12345` is completed. -[source,js] +[source,console] -------------------------------------------------- GET _tasks/oTUltX4IQMOUUVeiohTt8A:12345?wait_for_completion=true&timeout=10s -------------------------------------------------- -// CONSOLE // TEST[catch:missing] You can also wait for all tasks for certain action types to finish. This command will wait for all `reindex` tasks to finish: -[source,js] +[source,console] -------------------------------------------------- GET _tasks?actions=*reindex&wait_for_completion=true&timeout=10s -------------------------------------------------- -// CONSOLE ===== Listing tasks by using _cat @@ -217,12 +211,11 @@ GET _tasks?actions=*reindex&wait_for_completion=true&timeout=10s Tasks can be also listed using _cat version of the list tasks command, which accepts the same arguments as the standard list tasks command. -[source,js] +[source,console] -------------------------------------------------- GET _cat/tasks GET _cat/tasks?detailed -------------------------------------------------- -// CONSOLE [[task-cancellation]] ===== Task Cancellation @@ -230,22 +223,20 @@ GET _cat/tasks?detailed If a long-running task supports cancellation, it can be cancelled with the cancel tasks API. The following example cancels task `oTUltX4IQMOUUVeiohTt8A:12345`: -[source,js] +[source,console] -------------------------------------------------- POST _tasks/oTUltX4IQMOUUVeiohTt8A:12345/_cancel -------------------------------------------------- -// CONSOLE The task cancellation command supports the same task selection parameters as the list tasks command, so multiple tasks can be cancelled at the same time. For example, the following command will cancel all reindex tasks running on the nodes `nodeId1` and `nodeId2`. -[source,js] +[source,console] -------------------------------------------------- POST _tasks/_cancel?nodes=nodeId1,nodeId2&actions=*reindex -------------------------------------------------- -// CONSOLE ===== Task Grouping @@ -253,19 +244,17 @@ The task lists returned by task API commands can be grouped either by nodes (default) or by parent tasks using the `group_by` parameter. The following command will change the grouping to parent tasks: -[source,js] +[source,console] -------------------------------------------------- GET _tasks?group_by=parents -------------------------------------------------- -// CONSOLE The grouping can be disabled by specifying `none` as a `group_by` parameter: -[source,js] +[source,console] -------------------------------------------------- GET _tasks?group_by=none -------------------------------------------------- -// CONSOLE ===== Identifying running tasks diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index 223a9c55444..5a2c96532ba 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -58,7 +58,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] An example of a persistent update: -[source,js] +[source,console] -------------------------------------------------- PUT /_cluster/settings { @@ -67,12 +67,11 @@ PUT /_cluster/settings } } -------------------------------------------------- -// CONSOLE An example of a transient update: -[source,js] +[source,console] -------------------------------------------------- PUT /_cluster/settings?flat_settings=true { @@ -81,7 +80,6 @@ PUT /_cluster/settings?flat_settings=true } } -------------------------------------------------- -// CONSOLE The response to an update returns the changed setting, as in this response to @@ -102,7 +100,7 @@ the transient example: This example resets a setting: -[source,js] +[source,console] -------------------------------------------------- PUT /_cluster/settings { @@ -111,7 +109,6 @@ PUT /_cluster/settings } } -------------------------------------------------- -// CONSOLE The response does not include settings that have been reset: @@ -130,7 +127,7 @@ The response does not include settings that have been reset: You can also reset settings using wildcards. For example, to reset all dynamic `indices.recovery` settings: -[source,js] +[source,console] -------------------------------------------------- PUT /_cluster/settings { @@ -139,5 +136,3 @@ PUT /_cluster/settings } } -------------------------------------------------- -// CONSOLE - diff --git a/docs/reference/cluster/voting-exclusions.asciidoc b/docs/reference/cluster/voting-exclusions.asciidoc index ee4389aec29..28412620d2d 100644 --- a/docs/reference/cluster/voting-exclusions.asciidoc +++ b/docs/reference/cluster/voting-exclusions.asciidoc @@ -69,18 +69,16 @@ For more information, see <>. Add `nodeId1` to the voting configuration exclusions list: -[source,js] +[source,console] -------------------------------------------------- POST /_cluster/voting_config_exclusions/nodeId1 -------------------------------------------------- -// CONSOLE // TEST[catch:bad_request] Remove all exclusions from the list: -[source,js] +[source,console] -------------------------------------------------- DELETE /_cluster/voting_config_exclusions -------------------------------------------------- -// CONSOLE \ No newline at end of file diff --git a/docs/reference/data-frames/apis/delete-transform.asciidoc b/docs/reference/data-frames/apis/delete-transform.asciidoc deleted file mode 100644 index c01d18379ce..00000000000 --- a/docs/reference/data-frames/apis/delete-transform.asciidoc +++ /dev/null @@ -1,62 +0,0 @@ -[role="xpack"] -[testenv="basic"] -[[delete-data-frame-transform]] -=== Delete {dataframe-transforms} API - -[subs="attributes"] -++++ -Delete {dataframe-transforms} -++++ - -Deletes an existing {dataframe-transform}. - -beta[] - -[[delete-data-frame-transform-request]] -==== {api-request-title} - -`DELETE _data_frame/transforms/` - -[[delete-data-frame-transform-prereqs]] -==== {api-prereq-title} - -* Before you can delete the {dataframe-transform}, you must stop it. -* If the {es} {security-features} are enabled, you must have -`manage_data_frame_transforms` cluster privileges to use this API. The built-in -`data_frame_transforms_admin` role has these privileges. For more information, -see {stack-ov}/security-privileges.html[Security privileges] and -{stack-ov}/built-in-roles.html[Built-in roles]. - - -[[delete-data-frame-transform-path-parms]] -==== {api-path-parms-title} - -``:: - (Required, string) Identifier for the {dataframe-transform}. - -[[delete-data-frame-transform-query-parms]] -==== {api-query-parms-title} - -`force`:: -(Optional, boolean) When `true`, the {dataframe-transform} is deleted regardless of its -current state. The default value is `false`, meaning that the {dataframe-transform} must be -`stopped` before it can be deleted. - -[[delete-data-frame-transform-examples]] -==== {api-examples-title} - -[source,js] --------------------------------------------------- -DELETE _data_frame/transforms/ecommerce_transform --------------------------------------------------- -// CONSOLE -// TEST[skip:setup kibana sample data] - -When the {dataframe-transform} is deleted, you receive the following results: - -[source,console-result] ----- -{ - "acknowledged" : true -} ----- diff --git a/docs/reference/data-frames/apis/index.asciidoc b/docs/reference/data-frames/apis/index.asciidoc deleted file mode 100644 index 3abc123f354..00000000000 --- a/docs/reference/data-frames/apis/index.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -[role="xpack"] -[testenv="basic"] -[[data-frame-apis]] -== {dataframe-transform-cap} APIs - -See also {stack-ov}/ml-dataframes.html[{dataframe-transforms-cap}]. - -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> - -//CREATE -include::put-transform.asciidoc[] -//UPDATE -include::update-transform.asciidoc[] -//DELETE -include::delete-transform.asciidoc[] -//GET -include::get-transform.asciidoc[] -include::get-transform-stats.asciidoc[] -//PREVIEW -include::preview-transform.asciidoc[] -//START -include::start-transform.asciidoc[] -//STOP -include::stop-transform.asciidoc[] diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index bb83464e55a..2bf023045e3 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -69,7 +69,7 @@ Because this format uses literal `\n`'s as delimiters, please be sure that the JSON actions and sources are not pretty printed. Here is an example of a correct sequence of bulk commands: -[source,js] +[source,console] -------------------------------------------------- POST _bulk { "index" : { "_index" : "test", "_id" : "1" } } @@ -80,7 +80,6 @@ POST _bulk { "update" : {"_id" : "1", "_index" : "test"} } { "doc" : {"field2" : "value2"} } -------------------------------------------------- -// CONSOLE The result of this bulk operation is: @@ -261,7 +260,7 @@ The `update` action payload supports the following options: `doc` script), `lang` (for script), and `_source`. See update documentation for details on the options. Example with update actions: -[source,js] +[source,console] -------------------------------------------------- POST _bulk { "update" : {"_id" : "1", "_index" : "index1", "retry_on_conflict" : 3} } @@ -275,7 +274,6 @@ POST _bulk { "update" : {"_id" : "4", "_index" : "index1"} } { "doc" : {"field" : "value"}, "_source": true} -------------------------------------------------- -// CONSOLE // TEST[continued] [float] diff --git a/docs/reference/docs/concurrency-control.asciidoc b/docs/reference/docs/concurrency-control.asciidoc index a7c38f6c222..facd68532c3 100644 --- a/docs/reference/docs/concurrency-control.asciidoc +++ b/docs/reference/docs/concurrency-control.asciidoc @@ -20,7 +20,7 @@ a change that has a smaller sequence number assigned to it. For example, the following indexing command will create a document and assign it an initial sequence number and primary term: -[source,js] +[source,console] -------------------------------------------------- PUT products/_doc/1567 { @@ -28,7 +28,6 @@ PUT products/_doc/1567 "details" : "A resourceful astromech droid" } -------------------------------------------------- -// CONSOLE You can see the assigned sequence number and primary term in the `_seq_no` and `_primary_term` fields of the response: @@ -58,11 +57,10 @@ operation to have changed each of the documents it stores. The sequence number and primary term are returned in the `_seq_no` and `_primary_term` fields in the response of the <>: -[source,js] +[source,console] -------------------------------------------------- GET products/_doc/1567 -------------------------------------------------- -// CONSOLE // TEST[continued] returns: @@ -99,7 +97,7 @@ For example, the following indexing call will make sure to add a tag to the document without losing any potential change to the description or an addition of another tag by another API: -[source,js] +[source,console] -------------------------------------------------- PUT products/_doc/1567?if_seq_no=362&if_primary_term=2 { @@ -108,7 +106,6 @@ PUT products/_doc/1567?if_seq_no=362&if_primary_term=2 "tags": ["droid"] } -------------------------------------------------- -// CONSOLE // TEST[continued] // TEST[catch: conflict] diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index bded6cc1812..dd870cef0b1 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -6,7 +6,7 @@ Deletes documents that match the specified query. -[source,js] +[source,console] -------------------------------------------------- POST /twitter/_delete_by_query { @@ -17,7 +17,6 @@ POST /twitter/_delete_by_query } } -------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] [[docs-delete-by-query-api-request]] @@ -243,7 +242,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST /twitter/_delete_by_query { @@ -254,7 +253,6 @@ POST /twitter/_delete_by_query } } -------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] ////////////////////////// @@ -348,7 +346,7 @@ version conflicts. Delete all tweets from the `twitter` index: -[source,js] +[source,console] -------------------------------------------------- POST twitter/_delete_by_query?conflicts=proceed { @@ -357,12 +355,11 @@ POST twitter/_delete_by_query?conflicts=proceed } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Delete documents from multiple indices: -[source,js] +[source,console] -------------------------------------------------- POST /twitter,blog/_delete_by_query { @@ -371,13 +368,12 @@ POST /twitter,blog/_delete_by_query } } -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT twitter\nPUT blog\n/] Limit the delete by query operation to shards that a particular routing value: -[source,js] +[source,console] -------------------------------------------------- POST twitter/_delete_by_query?routing=1 { @@ -390,13 +386,12 @@ POST twitter/_delete_by_query?routing=1 } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] By default `_delete_by_query` uses scroll batches of 1000. You can change the batch size with the `scroll_size` URL parameter: -[source,js] +[source,console] -------------------------------------------------- POST twitter/_delete_by_query?scroll_size=5000 { @@ -407,7 +402,6 @@ POST twitter/_delete_by_query?scroll_size=5000 } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [float] @@ -417,7 +411,7 @@ POST twitter/_delete_by_query?scroll_size=5000 Slice a delete by query manually by providing a slice id and total number of slices: -[source,js] +[source,console] ---------------------------------------------------------------- POST twitter/_delete_by_query { @@ -448,12 +442,11 @@ POST twitter/_delete_by_query } } ---------------------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] Which you can verify works with: -[source,js] +[source,console] ---------------------------------------------------------------- GET _refresh POST twitter/_search?size=0&filter_path=hits.total @@ -467,7 +460,6 @@ POST twitter/_search?size=0&filter_path=hits.total } } ---------------------------------------------------------------- -// CONSOLE // TEST[continued] Which results in a sensible `total` like this one: @@ -492,7 +484,7 @@ You can also let delete-by-query automatically parallelize using <> to slice on `_id`. Use `slices` to specify the number of slices to use: -[source,js] +[source,console] ---------------------------------------------------------------- POST twitter/_delete_by_query?refresh&slices=5 { @@ -505,12 +497,11 @@ POST twitter/_delete_by_query?refresh&slices=5 } } ---------------------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] Which you also can verify works with: -[source,js] +[source,console] ---------------------------------------------------------------- POST twitter/_search?size=0&filter_path=hits.total { @@ -523,7 +514,6 @@ POST twitter/_search?size=0&filter_path=hits.total } } ---------------------------------------------------------------- -// CONSOLE // TEST[continued] Which results in a sensible `total` like this one: @@ -579,11 +569,10 @@ query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts. -[source,js] +[source,console] -------------------------------------------------- POST _delete_by_query/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 -------------------------------------------------- -// CONSOLE Use the <> to get the task ID. Set `requests_per_second` to any positive decimal value or `-1` to disable throttling. @@ -594,11 +583,10 @@ Use the <> to get the status of a delete by query operation: -[source,js] +[source,console] -------------------------------------------------- GET _tasks?detailed=true&actions=*/delete/byquery -------------------------------------------------- -// CONSOLE // TEST[skip:No tasks to retrieve] The response looks like: @@ -649,11 +637,10 @@ will finish when their sum is equal to the `total` field. With the task id you can look up the task directly: -[source,js] +[source,console] -------------------------------------------------- GET /_tasks/r1A2WoRbTwKZ516z6NEs5A:36619 -------------------------------------------------- -// CONSOLE // TEST[catch:missing] The advantage of this API is that it integrates with `wait_for_completion=false` @@ -670,11 +657,10 @@ you to delete that document. Any delete by query can be canceled using the <>: -[source,js] +[source,console] -------------------------------------------------- POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel -------------------------------------------------- -// CONSOLE The task ID can be found using the <>. diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index 4d4a81e5825..479db4a453e 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -57,22 +57,20 @@ For example: //// Example to delete with routing -[source,js] +[source,console] -------------------------------------------------- PUT /twitter/_doc/1?routing=kimchy { "test": "test" } -------------------------------------------------- -// CONSOLE //// -[source,js] +[source,console] -------------------------------------------------- DELETE /twitter/_doc/1?routing=kimchy -------------------------------------------------- -// CONSOLE // TEST[continued] This request deletes the tweet with id `1`, but it is routed based on the @@ -125,11 +123,10 @@ and responding with an error. The `timeout` parameter can be used to explicitly specify how long it waits. Here is an example of setting it to 5 minutes: -[source,js] +[source,console] -------------------------------------------------- DELETE /twitter/_doc/1?timeout=5m -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [[docs-delete-api-path-params]] @@ -167,11 +164,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] Delete the JSON document `1` from the `twitter` index: -[source,js] +[source,console] -------------------------------------------------- DELETE /twitter/_doc/1 -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The API returns the following result: diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index bee7e3700a9..bf1cd8d8179 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -43,11 +43,10 @@ By default, the get operation returns the contents of the `_source` field unless you have used the `stored_fields` parameter or if the `_source` field is disabled. You can turn off `_source` retrieval by using the `_source` parameter: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_doc/0?_source=false -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] If you only need one or two fields from the `_source`, use the `_source_includes` @@ -56,20 +55,18 @@ This can be especially helpful with large documents where partial retrieval can save on network overhead. Both parameters take a comma separated list of fields or wildcard expressions. Example: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_doc/0?_source_includes=*.id&_source_excludes=entities -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] If you only want to specify includes, you can use a shorter notation: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_doc/0?_source=*.id,retweeted -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [float] @@ -79,11 +76,10 @@ GET twitter/_doc/0?_source=*.id,retweeted If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_doc/2?routing=user1 -------------------------------------------------- -// CONSOLE // TEST[continued] This request gets the tweet with id `2`, but it is routed based on the @@ -231,11 +227,10 @@ If the `stored_fields` parameter is set to `true` and `found` is Retrieve the JSON document with the `_id` 0 from the `twitter` index: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_doc/0 -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The API returns the following result: @@ -262,11 +257,10 @@ The API returns the following result: Check to see if a document with the `_id` 0 exists: -[source,js] +[source,console] -------------------------------------------------- HEAD twitter/_doc/0 -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] {es} returns a status code of `200 - OK` if the document exists, or @@ -279,32 +273,29 @@ HEAD twitter/_doc/0 Use the `/_source/` resource to get just the `_source` field of a document. For example: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_source/1 -------------------------------------------------- -// CONSOLE // TEST[continued] You can use the source filtering parameters to control which parts of the `_source` are returned: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_source/1/?_source_includes=*.id&_source_excludes=entities -------------------------------------------------- -// CONSOLE // TEST[continued] You can use HEAD with the `_source` endpoint to efficiently test whether or not the document _source exists. A document's source is not available if it is disabled in the <>. -[source,js] +[source,console] -------------------------------------------------- HEAD twitter/_source/1 -------------------------------------------------- -// CONSOLE // TEST[continued] [float] @@ -315,7 +306,7 @@ Use the `stored_fields` parameter to specify the set of stored fields you want to retrieve. Any requested fields that are not stored are ignored. Consider for instance the following mapping: -[source,js] +[source,console] -------------------------------------------------- PUT twitter { @@ -333,11 +324,10 @@ PUT twitter } } -------------------------------------------------- -// CONSOLE Now we can add a document: -[source,js] +[source,console] -------------------------------------------------- PUT twitter/_doc/1 { @@ -345,16 +335,14 @@ PUT twitter/_doc/1 "tags" : ["red"] } -------------------------------------------------- -// CONSOLE // TEST[continued] And then try to retrieve it: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_doc/1?stored_fields=tags,counter -------------------------------------------------- -// CONSOLE // TEST[continued] The API returns the following result: @@ -383,7 +371,7 @@ Since the `counter` field is not stored, the get request ignores it. You can also retrieve metadata fields like the `_routing` field: -[source,js] +[source,console] -------------------------------------------------- PUT twitter/_doc/2?routing=user1 { @@ -391,14 +379,12 @@ PUT twitter/_doc/2?routing=user1 "tags" : ["white"] } -------------------------------------------------- -// CONSOLE // TEST[continued] -[source,js] +[source,console] -------------------------------------------------- GET twitter/_doc/2?routing=user1&stored_fields=tags,counter -------------------------------------------------- -// CONSOLE // TEST[continued] The API returns the following result: diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index e95f2ba4b81..02abf691304 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -190,7 +190,7 @@ If you don't specify a document ID when using POST, the `op_type` is automatically set to `create` and the index operation generates a unique ID for the document. -[source,js] +[source,console] -------------------------------------------------- POST twitter/_doc/ { @@ -199,7 +199,6 @@ POST twitter/_doc/ "message" : "trying out Elasticsearch" } -------------------------------------------------- -// CONSOLE The API returns the following result: @@ -241,7 +240,7 @@ hash of the document's id value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the `routing` parameter. For example: -[source,js] +[source,console] -------------------------------------------------- POST twitter/_doc?routing=kimchy { @@ -250,7 +249,6 @@ POST twitter/_doc?routing=kimchy "message" : "trying out Elasticsearch" } -------------------------------------------------- -// CONSOLE In this example, the document is routed to a shard based on the `routing` parameter provided: "kimchy". @@ -364,7 +362,7 @@ and responding with an error. The `timeout` parameter can be used to explicitly specify how long it waits. Here is an example of setting it to 5 minutes: -[source,js] +[source,console] -------------------------------------------------- PUT twitter/_doc/1?timeout=5m { @@ -373,7 +371,6 @@ PUT twitter/_doc/1?timeout=5m "message" : "trying out Elasticsearch" } -------------------------------------------------- -// CONSOLE [float] [[index-versioning]] @@ -394,14 +391,13 @@ indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example: -[source,js] +[source,console] -------------------------------------------------- PUT twitter/_doc/1?version=2&version_type=external { "message" : "elasticsearch now has versioning support, double cool!" } -------------------------------------------------- -// CONSOLE // TEST[continued] NOTE: Versioning is completely real time, and is not affected by the @@ -453,7 +449,7 @@ primary and replica shards to diverge. Insert a JSON document into the `twitter` index with an `_id` of 1: -[source,js] +[source,console] -------------------------------------------------- PUT twitter/_doc/1 { @@ -462,7 +458,6 @@ PUT twitter/_doc/1 "message" : "trying out Elasticsearch" } -------------------------------------------------- -// CONSOLE The API returns the following result: @@ -488,7 +483,7 @@ The API returns the following result: Use the `_create` resource to index a document into the `twitter` index if no document with that ID exists: -[source,js] +[source,console] -------------------------------------------------- PUT twitter/_create/1 { @@ -497,12 +492,11 @@ PUT twitter/_create/1 "message" : "trying out Elasticsearch" } -------------------------------------------------- -// CONSOLE Set the `op_type` parameter to _create_ to index a document into the `twitter` index if no document with that ID exists: -[source,js] +[source,console] -------------------------------------------------- PUT twitter/_doc/1?op_type=create { @@ -511,4 +505,3 @@ PUT twitter/_doc/1?op_type=create "message" : "trying out Elasticsearch" } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/docs/multi-get.asciidoc b/docs/reference/docs/multi-get.asciidoc index 28b76dbe42c..ccf91e172e4 100644 --- a/docs/reference/docs/multi-get.asciidoc +++ b/docs/reference/docs/multi-get.asciidoc @@ -11,7 +11,7 @@ successful get is similar in structure to a document provided by the Here is an example: -[source,js] +[source,console] -------------------------------------------------- GET /_mget { @@ -29,12 +29,11 @@ GET /_mget ] } -------------------------------------------------- -// CONSOLE The `mget` endpoint can also be used against an index (in which case it is not required in the body): -[source,js] +[source,console] -------------------------------------------------- GET /test/_mget { @@ -50,7 +49,6 @@ GET /test/_mget ] } -------------------------------------------------- -// CONSOLE And type: @@ -73,14 +71,13 @@ GET /test/_doc/_mget In which case, the `ids` element can directly be used to simplify the request: -[source,js] +[source,console] -------------------------------------------------- GET /test/_doc/_mget { "ids" : ["1", "2"] } -------------------------------------------------- -// CONSOLE [float] [[mget-source-filtering]] @@ -94,7 +91,7 @@ which will be used when there are no per-document instructions. For example: -[source,js] +[source,console] -------------------------------------------------- GET /_mget { @@ -123,7 +120,6 @@ GET /_mget ] } -------------------------------------------------- -// CONSOLE [float] @@ -133,7 +129,7 @@ GET /_mget Specific stored fields can be specified to be retrieved per document to get, similar to the <> parameter of the Get API. For example: -[source,js] +[source,console] -------------------------------------------------- GET /_mget { @@ -153,12 +149,11 @@ GET /_mget ] } -------------------------------------------------- -// CONSOLE Alternatively, you can specify the `stored_fields` parameter in the query string as a default to be applied to all documents. -[source,js] +[source,console] -------------------------------------------------- GET /test/_doc/_mget?stored_fields=field1,field2 { @@ -173,7 +168,6 @@ GET /test/_doc/_mget?stored_fields=field1,field2 ] } -------------------------------------------------- -// CONSOLE <1> Returns `field1` and `field2` <2> Returns `field3` and `field4` @@ -183,7 +177,7 @@ GET /test/_doc/_mget?stored_fields=field1,field2 You can also specify a routing value as a parameter: -[source,js] +[source,console] -------------------------------------------------- GET /_mget?routing=key1 { @@ -202,7 +196,6 @@ GET /_mget?routing=key1 ] } -------------------------------------------------- -// CONSOLE In this example, document `test/_doc/2` will be fetched from the shard corresponding to routing key `key1` but document `test/_doc/1` will be fetched from the shard corresponding to routing key `key2`. diff --git a/docs/reference/docs/multi-termvectors.asciidoc b/docs/reference/docs/multi-termvectors.asciidoc index df00b39ef42..d4749ec68b8 100644 --- a/docs/reference/docs/multi-termvectors.asciidoc +++ b/docs/reference/docs/multi-termvectors.asciidoc @@ -10,7 +10,7 @@ array with all the fetched termvectors, each element having the structure provided by the <> API. Here is an example: -[source,js] +[source,console] -------------------------------------------------- POST /_mtermvectors { @@ -30,7 +30,6 @@ POST /_mtermvectors ] } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] See the <> API for a description of possible parameters. @@ -38,7 +37,7 @@ See the <> API for a description of possible param The `_mtermvectors` endpoint can also be used against an index (in which case it is not required in the body): -[source,js] +[source,console] -------------------------------------------------- POST /twitter/_mtermvectors { @@ -56,12 +55,11 @@ POST /twitter/_mtermvectors ] } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] If all requested documents are on same index and also the parameters are the same, the request can be simplified: -[source,js] +[source,console] -------------------------------------------------- POST /twitter/_mtermvectors { @@ -74,14 +72,13 @@ POST /twitter/_mtermvectors } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Additionally, just like for the <> API, term vectors could be generated for user provided documents. The mapping used is determined by `_index`. -[source,js] +[source,console] -------------------------------------------------- POST /_mtermvectors { @@ -103,5 +100,4 @@ POST /_mtermvectors ] } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] diff --git a/docs/reference/docs/refresh.asciidoc b/docs/reference/docs/refresh.asciidoc index 127ec9fc6fd..c5b19903dac 100644 --- a/docs/reference/docs/refresh.asciidoc +++ b/docs/reference/docs/refresh.asciidoc @@ -83,32 +83,29 @@ many times they modify the shard. These will create a document and immediately refresh the index so it is visible: -[source,js] +[source,console] -------------------------------------------------- PUT /test/_doc/1?refresh {"test": "test"} PUT /test/_doc/2?refresh=true {"test": "test"} -------------------------------------------------- -// CONSOLE These will create a document without doing anything to make it visible for search: -[source,js] +[source,console] -------------------------------------------------- PUT /test/_doc/3 {"test": "test"} PUT /test/_doc/4?refresh=false {"test": "test"} -------------------------------------------------- -// CONSOLE This will create a document and wait for it to become visible for search: -[source,js] +[source,console] -------------------------------------------------- PUT /test/_doc/4?refresh=wait_for {"test": "test"} -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 2db9d769fa7..bda2138f9f5 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -12,7 +12,7 @@ counts, replicas, etc. The most basic form of `_reindex` just copies documents from one index to another. This will copy documents from the `twitter` index into the `new_twitter` index: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -24,7 +24,6 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] That will return something like this: @@ -61,7 +60,7 @@ index API to control optimistic concurrency control. Just leaving out to blindly dump documents into the target, overwriting any that happen to have the same type and id: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -74,7 +73,6 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Setting `version_type` to `external` will cause Elasticsearch to preserve the @@ -82,7 +80,7 @@ Setting `version_type` to `external` will cause Elasticsearch to preserve the any documents that have an older version in the destination index than they do in the source index: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -95,14 +93,13 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Settings `op_type` to `create` will cause `_reindex` to only create missing documents in the target index. All existing documents will cause a version conflict: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -115,7 +112,6 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] By default, version conflicts abort the `_reindex` process. The `"conflicts"` request body @@ -124,7 +120,7 @@ It is important to note that the handling of other error types is unaffected by When `"conflicts": "proceed"` is set in the request body, the `_reindex` process will continue on version conflicts and return a count of version conflicts encountered: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -138,13 +134,12 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] You can limit the documents by adding a query to the `source`. This will only copy tweets made by `kimchy` into `new_twitter`: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -161,14 +156,13 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] `index` in `source` can be a list, allowing you to copy from lots of sources in one request. This will copy documents from the `twitter` and `blog` indices: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -180,7 +174,6 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] // TEST[s/^/PUT blog\/post\/post1?refresh\n{"test": "foo"}\n/] @@ -193,7 +186,7 @@ It's also possible to limit the number of processed documents by setting `max_docs`. This will only copy a single document from `twitter` to `new_twitter`: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -206,7 +199,6 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] If you want a particular set of documents from the `twitter` index you'll @@ -214,7 +206,7 @@ need to use `sort`. Sorting makes the scroll less efficient but in some contexts it's worth it. If possible, prefer a more selective query to `max_docs` and `sort`. This will copy 10000 documents from `twitter` into `new_twitter`: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -228,7 +220,6 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The `source` section supports all the elements that are supported in a @@ -236,7 +227,7 @@ The `source` section supports all the elements that are supported in a fields from the original documents can be reindexed using `source` filtering as follows: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -249,7 +240,6 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [[reindex-scripts]] @@ -257,7 +247,7 @@ Like `_update_by_query`, `_reindex` supports a script that modifies the document. Unlike `_update_by_query`, the script is allowed to modify the document's metadata. This example bumps the version of the source document: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -274,7 +264,6 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Just as in `_update_by_query`, you can set `ctx.op` to change the @@ -330,7 +319,7 @@ For example, you can use the following request to copy all documents from the `source` index with the company name `cat` into the `dest` index with routing set to `cat`. -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -348,13 +337,12 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT source\n/] By default `_reindex` uses scroll batches of 1000. You can change the batch size with the `size` field in the `source` element: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -368,13 +356,12 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT source\n/] Reindex can also use the <> feature by specifying a `pipeline` like this: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -387,7 +374,6 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT source\n/] [float] @@ -396,7 +382,7 @@ POST _reindex Reindex supports reindexing from a remote Elasticsearch cluster: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -418,7 +404,6 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:host] // TEST[s/^/PUT source\n/] // TEST[s/otherhost:9200",/\${host}"/] @@ -464,7 +449,7 @@ maximum size of 100mb. If the remote index includes very large documents you'll need to use a smaller batch size. The example below sets the batch size to `10` which is very, very small. -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -485,7 +470,6 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:host] // TEST[s/^/PUT source\n/] // TEST[s/otherhost:9200/\${host}/] @@ -496,7 +480,7 @@ with the `socket_timeout` field and the connection timeout with the sets the socket read timeout to one minute and the connection timeout to 10 seconds: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -518,7 +502,6 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[setup:host] // TEST[s/^/PUT source\n/] // TEST[s/otherhost:9200/\${host}/] @@ -670,7 +653,7 @@ starting the next set. This is "bursty" instead of "smooth". The default value i ==== Response body ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST /_reindex?wait_for_completion { @@ -682,7 +665,6 @@ POST /_reindex?wait_for_completion } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] ////////////////////////// @@ -786,11 +768,10 @@ the `conflicts` option to prevent reindex from aborting on version conflicts. You can fetch the status of all running reindex requests with the <>: -[source,js] +[source,console] -------------------------------------------------- GET _tasks?detailed=true&actions=*reindex -------------------------------------------------- -// CONSOLE // TEST[skip:No tasks to retrieve] The response looks like: @@ -851,11 +832,10 @@ will finish when their sum is equal to the `total` field. With the task id you can look up the task directly. The following example retrieves information about the task `r1A2WoRbTwKZ516z6NEs5A:36619`: -[source,js] +[source,console] -------------------------------------------------- GET /_tasks/r1A2WoRbTwKZ516z6NEs5A:36619 -------------------------------------------------- -// CONSOLE // TEST[catch:missing] The advantage of this API is that it integrates with `wait_for_completion=false` @@ -873,11 +853,10 @@ you to delete that document. Any reindex can be canceled using the <>. For example: -[source,js] +[source,console] -------------------------------------------------- POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel -------------------------------------------------- -// CONSOLE The task ID can be found using the <>. @@ -892,11 +871,10 @@ API will continue to list the task until it wakes to cancel itself. The value of `requests_per_second` can be changed on a running reindex using the `_rethrottle` API: -[source,js] +[source,console] -------------------------------------------------- POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 -------------------------------------------------- -// CONSOLE The task ID can be found using the <>. @@ -914,7 +892,7 @@ timeouts. `_reindex` can be used to build a copy of an index with renamed fields. Say you create an index containing documents that look like this: -[source,js] +[source,console] -------------------------------------------------- POST test/_doc/1?refresh { @@ -922,12 +900,11 @@ POST test/_doc/1?refresh "flag": "foo" } -------------------------------------------------- -// CONSOLE but you don't like the name `flag` and want to replace it with `tag`. `_reindex` can create the other index for you: -[source,js] +[source,console] -------------------------------------------------- POST _reindex { @@ -942,16 +919,14 @@ POST _reindex } } -------------------------------------------------- -// CONSOLE // TEST[continued] Now you can get the new document: -[source,js] +[source,console] -------------------------------------------------- GET test2/_doc/1 -------------------------------------------------- -// CONSOLE // TEST[continued] which will return: @@ -988,7 +963,7 @@ break the request down into smaller parts. Slice a reindex request manually by providing a slice id and total number of slices to each request: -[source,js] +[source,console] ---------------------------------------------------------------- POST _reindex { @@ -1017,17 +992,15 @@ POST _reindex } } ---------------------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] You can verify this works by: -[source,js] +[source,console] ---------------------------------------------------------------- GET _refresh POST new_twitter/_search?size=0&filter_path=hits.total ---------------------------------------------------------------- -// CONSOLE // TEST[continued] which results in a sensible `total` like this one: @@ -1051,7 +1024,7 @@ which results in a sensible `total` like this one: You can also let `_reindex` automatically parallelize using <> to slice on `_uid`. Use `slices` to specify the number of slices to use: -[source,js] +[source,console] ---------------------------------------------------------------- POST _reindex?slices=5&refresh { @@ -1063,16 +1036,14 @@ POST _reindex?slices=5&refresh } } ---------------------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] You can also this verify works by: -[source,js] +[source,console] ---------------------------------------------------------------- POST new_twitter/_search?size=0&filter_path=hits.total ---------------------------------------------------------------- -// CONSOLE // TEST[continued] which results in a sensible `total` like this one: @@ -1172,14 +1143,13 @@ a new template to the existing documents. Assuming you have indices consisting of documents as follows: -[source,js] +[source,console] ---------------------------------------------------------------- PUT metricbeat-2016.05.30/_doc/1?refresh {"system.cpu.idle.pct": 0.908} PUT metricbeat-2016.05.31/_doc/1?refresh {"system.cpu.idle.pct": 0.105} ---------------------------------------------------------------- -// CONSOLE The new template for the `metricbeat-*` indices is already loaded into Elasticsearch, but it applies only to the newly created indices. Painless can be used to reindex @@ -1189,7 +1159,7 @@ The script below extracts the date from the index name and creates a new index with `-1` appended. All data from `metricbeat-2016.05.31` will be reindexed into `metricbeat-2016.05.31-1`. -[source,js] +[source,console] ---------------------------------------------------------------- POST _reindex { @@ -1205,17 +1175,15 @@ POST _reindex } } ---------------------------------------------------------------- -// CONSOLE // TEST[continued] All documents from the previous metricbeat indices can now be found in the `*-1` indices. -[source,js] +[source,console] ---------------------------------------------------------------- GET metricbeat-2016.05.30-1/_doc/1 GET metricbeat-2016.05.31-1/_doc/1 ---------------------------------------------------------------- -// CONSOLE // TEST[continued] The previous method can also be used in conjunction with <> @@ -1226,7 +1194,7 @@ to load only the existing data into the new index and rename any fields if neede `_reindex` can be used to extract a random subset of an index for testing: -[source,js] +[source,console] ---------------------------------------------------------------- POST _reindex { @@ -1246,7 +1214,6 @@ POST _reindex } } ---------------------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] <1> `_reindex` defaults to sorting by `_doc` so `random_score` will not have any diff --git a/docs/reference/docs/termvectors.asciidoc b/docs/reference/docs/termvectors.asciidoc index ef2aca39c92..a9c8414fd71 100644 --- a/docs/reference/docs/termvectors.asciidoc +++ b/docs/reference/docs/termvectors.asciidoc @@ -6,21 +6,19 @@ document. The document could be stored in the index or artificially provided by the user. Term vectors are <> by default, not near realtime. This can be changed by setting `realtime` parameter to `false`. -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_termvectors/1 -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Optionally, you can specify the fields for which the information is retrieved either with a parameter in the url -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_termvectors/1?fields=message -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] or by adding the requested fields in the request body (see @@ -123,7 +121,7 @@ from is randomly selected. Use `routing` only to hit a particular shard. First, we create an index that stores term vectors, payloads etc. : -[source,js] +[source,console] -------------------------------------------------- PUT /twitter { "mappings": { @@ -161,11 +159,10 @@ PUT /twitter } } -------------------------------------------------- -// CONSOLE Second, we add some documents: -[source,js] +[source,console] -------------------------------------------------- PUT /twitter/_doc/1 { @@ -179,13 +176,12 @@ PUT /twitter/_doc/2 "text" : "Another twitter test ..." } -------------------------------------------------- -// CONSOLE // TEST[continued] The following request returns all information and statistics for field `text` in document `1` (John Doe): -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_termvectors/1 { @@ -197,7 +193,6 @@ GET /twitter/_termvectors/1 "field_statistics" : true } -------------------------------------------------- -// CONSOLE // TEST[continued] Response: @@ -273,7 +268,7 @@ computed on the fly. The following request returns all information and statistic fields in document `1`, even though the terms haven't been explicitly stored in the index. Note that for the field `text`, the terms are not re-generated. -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_termvectors/1 { @@ -284,7 +279,6 @@ GET /twitter/_termvectors/1 "field_statistics" : true } -------------------------------------------------- -// CONSOLE // TEST[continued] [[docs-termvectors-artificial-doc]] @@ -298,7 +292,7 @@ return the same results as in example 1. The mapping used is determined by the ` *If dynamic mapping is turned on (default), the document fields not in the original mapping will be dynamically created.* -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_termvectors { @@ -308,7 +302,6 @@ GET /twitter/_termvectors } } -------------------------------------------------- -// CONSOLE // TEST[continued] [[docs-termvectors-per-field-analyzer]] @@ -321,7 +314,7 @@ generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be re-generated. -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_termvectors { @@ -335,7 +328,6 @@ GET /twitter/_termvectors } } -------------------------------------------------- -// CONSOLE // TEST[continued] Response: @@ -388,7 +380,7 @@ artificial document having the given "plot" field value. Notice that the keyword "Tony" or any stop words are not part of the response, as their tf-idf must be too low. -[source,js] +[source,console] -------------------------------------------------- GET /imdb/_termvectors { @@ -406,7 +398,6 @@ GET /imdb/_termvectors } } -------------------------------------------------- -// CONSOLE // TEST[skip:no imdb test index] Response: diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index c4972fa89bb..d85f76a6765 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -6,11 +6,10 @@ document in the index without changing the source. This is useful to <> or some other online mapping change. Here is the API: -[source,js] +[source,console] -------------------------------------------------- POST twitter/_update_by_query?conflicts=proceed -------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] That will return something like this: @@ -65,18 +64,17 @@ that update will have picked up the online mapping update. Back to the API format, this will update tweets from the `twitter` index: -[source,js] +[source,console] -------------------------------------------------- POST twitter/_update_by_query?conflicts=proceed -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] You can also limit `_update_by_query` using the <>. This will update all documents from the `twitter` index for the user `kimchy`: -[source,js] +[source,console] -------------------------------------------------- POST twitter/_update_by_query?conflicts=proceed { @@ -87,7 +85,6 @@ POST twitter/_update_by_query?conflicts=proceed } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] <1> The query must be passed as a value to the `query` key, in the same @@ -100,7 +97,7 @@ is genuinely useful for things like fun. `_update_by_query` <> to update the document. This will increment the `likes` field on all of kimchy's tweets: -[source,js] +[source,console] -------------------------------------------------- POST twitter/_update_by_query { @@ -115,7 +112,6 @@ POST twitter/_update_by_query } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Just as in <> you can set `ctx.op` to change the @@ -148,37 +144,34 @@ from its original location. It's also possible to do this whole thing on multiple indexes at once, just like the search API: -[source,js] +[source,console] -------------------------------------------------- POST twitter,blog/_update_by_query -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT twitter\nPUT blog\n/] If you provide `routing` then the routing is copied to the scroll query, limiting the process to the shards that match that routing value: -[source,js] +[source,console] -------------------------------------------------- POST twitter/_update_by_query?routing=1 -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] By default `_update_by_query` uses scroll batches of 1000. You can change the batch size with the `scroll_size` URL parameter: -[source,js] +[source,console] -------------------------------------------------- POST twitter/_update_by_query?scroll_size=100 -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] `_update_by_query` can also use the <> feature by specifying a `pipeline` like this: -[source,js] +[source,console] -------------------------------------------------- PUT _ingest/pipeline/set-foo { @@ -192,7 +185,6 @@ PUT _ingest/pipeline/set-foo } POST twitter/_update_by_query?pipeline=set-foo -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [float] @@ -249,11 +241,10 @@ starting the next set. This is "bursty" instead of "smooth". The default is `-1` ==== Response body ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST /twitter/_update_by_query?conflicts=proceed -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] ////////////////////////// @@ -356,11 +347,10 @@ version conflicts. You can fetch the status of all running update by query requests with the <>: -[source,js] +[source,console] -------------------------------------------------- GET _tasks?detailed=true&actions=*byquery -------------------------------------------------- -// CONSOLE // TEST[skip:No tasks to retrieve] The responses looks like: @@ -415,11 +405,10 @@ will finish when their sum is equal to the `total` field. With the task id you can look up the task directly. The following example retrieves information about task `r1A2WoRbTwKZ516z6NEs5A:36619`: -[source,js] +[source,console] -------------------------------------------------- GET /_tasks/r1A2WoRbTwKZ516z6NEs5A:36619 -------------------------------------------------- -// CONSOLE // TEST[catch:missing] The advantage of this API is that it integrates with `wait_for_completion=false` @@ -436,11 +425,10 @@ you to delete that document. Any update by query can be cancelled using the <>: -[source,js] +[source,console] -------------------------------------------------- POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel -------------------------------------------------- -// CONSOLE The task ID can be found using the <>. @@ -456,11 +444,10 @@ that it has been cancelled and terminates itself. The value of `requests_per_second` can be changed on a running update by query using the `_rethrottle` API: -[source,js] +[source,console] -------------------------------------------------- POST _update_by_query/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1 -------------------------------------------------- -// CONSOLE The task ID can be found using the <>. @@ -485,7 +472,7 @@ break the request down into smaller parts. Slice an update by query manually by providing a slice id and total number of slices to each request: -[source,js] +[source,console] ---------------------------------------------------------------- POST twitter/_update_by_query { @@ -508,17 +495,15 @@ POST twitter/_update_by_query } } ---------------------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] Which you can verify works with: -[source,js] +[source,console] ---------------------------------------------------------------- GET _refresh POST twitter/_search?size=0&q=extra:test&filter_path=hits.total ---------------------------------------------------------------- -// CONSOLE // TEST[continued] Which results in a sensible `total` like this one: @@ -543,7 +528,7 @@ You can also let update by query automatically parallelize using <> to slice on `_id`. Use `slices` to specify the number of slices to use: -[source,js] +[source,console] ---------------------------------------------------------------- POST twitter/_update_by_query?refresh&slices=5 { @@ -552,16 +537,14 @@ POST twitter/_update_by_query?refresh&slices=5 } } ---------------------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] Which you also can verify works with: -[source,js] +[source,console] ---------------------------------------------------------------- POST twitter/_search?size=0&q=extra:test&filter_path=hits.total ---------------------------------------------------------------- -// CONSOLE // TEST[continued] Which results in a sensible `total` like this one: @@ -634,7 +617,7 @@ documents being reindexed and cluster resources. Say you created an index without dynamic mapping, filled it with data, and then added a mapping value to pick up more fields from the data: -[source,js] +[source,console] -------------------------------------------------- PUT test { @@ -664,7 +647,6 @@ PUT test/_mapping <2> } } -------------------------------------------------- -// CONSOLE <1> This means that new fields won't be indexed, just stored in `_source`. @@ -673,7 +655,7 @@ field you have to reindex all documents with it. Searching for the data won't find anything: -[source,js] +[source,console] -------------------------------------------------- POST test/_search?filter_path=hits.total { @@ -684,7 +666,6 @@ POST test/_search?filter_path=hits.total } } -------------------------------------------------- -// CONSOLE // TEST[continued] [source,console-result] @@ -701,7 +682,7 @@ POST test/_search?filter_path=hits.total But you can issue an `_update_by_query` request to pick up the new mapping: -[source,js] +[source,console] -------------------------------------------------- POST test/_update_by_query?refresh&conflicts=proceed POST test/_search?filter_path=hits.total @@ -713,7 +694,6 @@ POST test/_search?filter_path=hits.total } } -------------------------------------------------- -// CONSOLE // TEST[continued] [source,console-result] diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 386363393e7..3b6e94c7afa 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -80,7 +80,7 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards] First, let's index a simple doc: -[source,js] +[source,console] -------------------------------------------------- PUT test/_doc/1 { @@ -88,12 +88,11 @@ PUT test/_doc/1 "tags" : ["red"] } -------------------------------------------------- -// CONSOLE To increment the counter, you can submit an update request with the following script: -[source,js] +[source,console] -------------------------------------------------- POST test/_update/1 { @@ -106,13 +105,12 @@ POST test/_update/1 } } -------------------------------------------------- -// CONSOLE // TEST[continued] Similarly, you could use and update script to add a tag to the list of tags (this is just a list, so the tag is added even it exists): -[source,js] +[source,console] -------------------------------------------------- POST test/_update/1 { @@ -125,7 +123,6 @@ POST test/_update/1 } } -------------------------------------------------- -// CONSOLE // TEST[continued] You could also remove a tag from the list of tags. The Painless @@ -134,7 +131,7 @@ you want to remove. To avoid a possible runtime error, you first need to make sure the tag exists. If the list contains duplicates of the tag, this script just removes one occurrence. -[source,js] +[source,console] -------------------------------------------------- POST test/_update/1 { @@ -147,39 +144,36 @@ POST test/_update/1 } } -------------------------------------------------- -// CONSOLE // TEST[continued] You can also add and remove fields from a document. For example, this script adds the field `new_field`: -[source,js] +[source,console] -------------------------------------------------- POST test/_update/1 { "script" : "ctx._source.new_field = 'value_of_new_field'" } -------------------------------------------------- -// CONSOLE // TEST[continued] Conversely, this script removes the field `new_field`: -[source,js] +[source,console] -------------------------------------------------- POST test/_update/1 { "script" : "ctx._source.remove('new_field')" } -------------------------------------------------- -// CONSOLE // TEST[continued] Instead of updating the document, you can also change the operation that is executed from within the script. For example, this request deletes the doc if the `tags` field contains `green`, otherwise it does nothing (`noop`): -[source,js] +[source,console] -------------------------------------------------- POST test/_update/1 { @@ -192,7 +186,6 @@ POST test/_update/1 } } -------------------------------------------------- -// CONSOLE // TEST[continued] [float] @@ -201,7 +194,7 @@ POST test/_update/1 The following partial update adds a new field to the existing document: -[source,js] +[source,console] -------------------------------------------------- POST test/_update/1 { @@ -210,7 +203,6 @@ POST test/_update/1 } } -------------------------------------------------- -// CONSOLE // TEST[continued] If both `doc` and `script` are specified, then `doc` is ignored. If you @@ -222,7 +214,7 @@ specify a scripted update, include the fields you want to update in the script. By default updates that don't change anything detect that they don't change anything and return `"result": "noop"`: -[source,js] +[source,console] -------------------------------------------------- POST test/_update/1 { @@ -231,7 +223,6 @@ POST test/_update/1 } } -------------------------------------------------- -// CONSOLE // TEST[continued] If the value of `name` is already `new_name`, the update @@ -257,7 +248,7 @@ request is ignored and the `result` element in the response returns `noop`: You can disable this behavior by setting `"detect_noop": false`: -[source,js] +[source,console] -------------------------------------------------- POST test/_update/1 { @@ -267,7 +258,6 @@ POST test/_update/1 "detect_noop": false } -------------------------------------------------- -// CONSOLE // TEST[continued] [[upserts]] @@ -278,7 +268,7 @@ If the document does not already exist, the contents of the `upsert` element are inserted as a new document. If the document exists, the `script` is executed: -[source,js] +[source,console] -------------------------------------------------- POST test/_update/1 { @@ -294,7 +284,6 @@ POST test/_update/1 } } -------------------------------------------------- -// CONSOLE // TEST[continued] [float] @@ -304,7 +293,7 @@ POST test/_update/1 To run the script whether or not the document exists, set `scripted_upsert` to `true`: -[source,js] +[source,console] -------------------------------------------------- POST sessions/_update/dh3sgudg8gsrgl { @@ -322,7 +311,6 @@ POST sessions/_update/dh3sgudg8gsrgl "upsert" : {} } -------------------------------------------------- -// CONSOLE // TEST[s/"id": "my_web_session_summariser"/"source": "ctx._source.page_view_event = params.pageViewEvent"/] // TEST[continued] @@ -334,7 +322,7 @@ Instead of sending a partial `doc` plus an `upsert` doc, you can set `doc_as_upsert` to `true` to use the contents of `doc` as the `upsert` value: -[source,js] +[source,console] -------------------------------------------------- POST test/_update/1 { @@ -344,5 +332,4 @@ POST test/_update/1 "doc_as_upsert" : true } -------------------------------------------------- -// CONSOLE // TEST[continued] diff --git a/docs/reference/frozen-indices.asciidoc b/docs/reference/frozen-indices.asciidoc index 60f07b58bcb..b6cafad30f5 100644 --- a/docs/reference/frozen-indices.asciidoc +++ b/docs/reference/frozen-indices.asciidoc @@ -62,11 +62,10 @@ It's highly recommended to <> your indices pri segment on disk. This not only provides much better compression but also simplifies the data structures needed to service aggregation or sorted search requests. -[source,js] +[source,console] -------------------------------------------------- POST /twitter/_forcemerge?max_num_segments=1 -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [role="xpack"] @@ -80,11 +79,10 @@ Search requests will not be executed against frozen indices by default, even if to prevent accidental slowdowns by targeting a frozen index by mistake. To include frozen indices a search request must be executed with the query parameter `ignore_throttled=false`. -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search?q=user:kimchy&ignore_throttled=false -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [IMPORTANT] @@ -104,11 +102,10 @@ significant overhead associated with this pre-filter phase. Frozen indices are ordinary indices that use search throttling and a memory efficient shard implementation. For API's like the <> frozen indices may identified by an index's `search.throttled` property (`sth`). -[source,js] +[source,console] -------------------------------------------------- GET /_cat/indices/twitter?v&h=i,sth -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT twitter\nPOST twitter\/_freeze\n/] The response looks like: diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 3bcb2e65541..165b769b6d7 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -165,11 +165,10 @@ TIP: You'll want to check out the https://www.elastic.co/guide/en/elasticsearch/client/index.html[{es} language clients] when you're ready to start using {es} in your own applications. + -[source,js] +[source,console] -------------------------------------------------- GET /_cat/health?v -------------------------------------------------- -// CONSOLE + The response should indicate that the status of the `elasticsearch` cluster is `green` and it has three nodes: @@ -211,14 +210,13 @@ You can do this directly with a simple PUT request that specifies the index you want to add the document, a unique document ID, and one or more `"field": "value"` pairs in the request body: -[source,js] +[source,console] -------------------------------------------------- PUT /customer/_doc/1 { "name": "John Doe" } -------------------------------------------------- -// CONSOLE This request automatically creates the `customer` index if it doesn't already exist, adds a new document that has an ID of `1`, and stores and @@ -252,11 +250,10 @@ operation was that version 1 of the document was created: The new document is available immediately from any node in the cluster. You can retrieve it with a GET request that specifies its document ID: -[source,js] +[source,console] -------------------------------------------------- GET /customer/_doc/1 -------------------------------------------------- -// CONSOLE // TEST[continued] The response indicates that a document with the specified ID was found @@ -327,11 +324,10 @@ curl "localhost:9200/_cat/indices?v" This replicates the above in a document-testing friendly way but isn't visible in the docs: + -[source,js] +[source,console] -------------------------------------------------- GET /_cat/indices?v -------------------------------------------------- -// CONSOLE // TEST[setup:bank] //// + @@ -357,7 +353,7 @@ want to search in the request URI. For example, the following request retrieves all documents in the `bank` index sorted by account number: -[source,js] +[source,console] -------------------------------------------------- GET /bank/_search { @@ -367,7 +363,6 @@ GET /bank/_search ] } -------------------------------------------------- -// CONSOLE // TEST[continued] By default, the `hits` section of the response includes the first 10 documents @@ -429,7 +424,7 @@ the `from` and `size` parameters in your request. For example, the following request gets hits 10 through 19: -[source,js] +[source,console] -------------------------------------------------- GET /bank/_search { @@ -441,7 +436,6 @@ GET /bank/_search "size": 10 } -------------------------------------------------- -// CONSOLE // TEST[continued] Now that you've seen how to submit a basic search request, you can start to @@ -451,28 +445,26 @@ To search for specific terms within a field, you can use a `match` query. For example, the following request searches the `address` field to find customers whose addresses contain `mill` or `lane`: -[source,js] +[source,console] -------------------------------------------------- GET /bank/_search { "query": { "match": { "address": "mill lane" } } } -------------------------------------------------- -// CONSOLE // TEST[continued] To perform a phrase search rather than matching individual terms, you use `match_phrase` instead of `match`. For example, the following request only matches addresses that contain the phrase `mill lane`: -[source,js] +[source,console] -------------------------------------------------- GET /bank/_search { "query": { "match_phrase": { "address": "mill lane" } } } -------------------------------------------------- -// CONSOLE // TEST[continued] To construct more complex queries, you can use a `bool` query to combine @@ -483,7 +475,7 @@ For example, the following request searches the `bank` index for accounts that belong to customers who are 40 years old, but excludes anyone who lives in Idaho (ID): -[source,js] +[source,console] -------------------------------------------------- GET /bank/_search { @@ -499,7 +491,6 @@ GET /bank/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] Each `must`, `should`, and `must_not` element in a Boolean query is referred @@ -516,7 +507,7 @@ include or exclude documents based on structured data. For example, the following request uses a range filter to limit the results to accounts with a balance between $20,000 and $30,000 (inclusive). -[source,js] +[source,console] -------------------------------------------------- GET /bank/_search { @@ -535,7 +526,6 @@ GET /bank/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] [[getting-started-aggregations]] @@ -551,7 +541,7 @@ For example, the following request uses a `terms` aggregation to group all of the accounts in the `bank` index by state, and returns the ten states with the most accounts in descending order: -[source,js] +[source,console] -------------------------------------------------- GET /bank/_search { @@ -565,7 +555,6 @@ GET /bank/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] The `buckets` in the response are the values of the `state` field. The @@ -639,7 +628,7 @@ example, the following request nests an `avg` aggregation within the previous `group_by_state` aggregation to calculate the average account balances for each state. -[source,js] +[source,console] -------------------------------------------------- GET /bank/_search { @@ -660,13 +649,12 @@ GET /bank/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] Instead of sorting the results by count, you could sort using the result of the nested aggregation by specifying the order within the `terms` aggregation: -[source,js] +[source,console] -------------------------------------------------- GET /bank/_search { @@ -690,7 +678,6 @@ GET /bank/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] In addition to basic bucketing and metrics aggregations like these, {es} diff --git a/docs/reference/glossary.asciidoc b/docs/reference/glossary.asciidoc index e13b5812f7b..63d0a43a8bf 100644 --- a/docs/reference/glossary.asciidoc +++ b/docs/reference/glossary.asciidoc @@ -100,11 +100,15 @@ An index is a logical namespace which maps to one or more + -- // tag::index-alias-def[] +// tag::index-alias-desc[] An index alias is a secondary name used to refer to one or more existing indices. Most {es} APIs accept an index alias in place of an index name. +// end::index-alias-desc[] + +See {ref}/indices-add-alias.html[Add index alias]. // end::index-alias-def[] See <>. diff --git a/docs/reference/graph/explore.asciidoc b/docs/reference/graph/explore.asciidoc index 5eb174efdb7..8d3c244f99e 100644 --- a/docs/reference/graph/explore.asciidoc +++ b/docs/reference/graph/explore.asciidoc @@ -181,7 +181,7 @@ a maximum number of documents per value for that field. For example: An initial search typically begins with a query to identify strongly related terms. -[source,js] +[source,console] -------------------------------------------------- POST clicklogs/_graph/explore { @@ -204,7 +204,7 @@ POST clicklogs/_graph/explore } } -------------------------------------------------- -// CONSOLE + <1> Seed the exploration with a query. This example is searching clicklogs for people who searched for the term "midi". <2> Identify the vertices to include in the graph. This example is looking for @@ -286,7 +286,7 @@ every document could be of interest, see the {kibana-ref}/graph-troubleshooting.html[Troubleshooting] guide. -[source,js] +[source,console] -------------------------------------------------- POST clicklogs/_graph/explore { @@ -337,7 +337,7 @@ POST clicklogs/_graph/explore } } -------------------------------------------------- -// CONSOLE + <1> Disable `use_significance` to include all associated terms, not just the ones that are significantly associated with the query. <2> Increase the sample size to consider a larger set of documents on @@ -373,7 +373,7 @@ the following request starts with the product `1854873` and spiders out to find additional search terms associated with that product. The terms "midi", "midi keyboard", and "synth" are excluded from the results. -[source,js] +[source,console] -------------------------------------------------- POST clicklogs/_graph/explore { @@ -397,7 +397,7 @@ POST clicklogs/_graph/explore } } -------------------------------------------------- -// CONSOLE + <1> The vertices you want to start from are specified as an array of terms in an `include` clause. <2> The `exclude` clause prevents terms you already know about from being diff --git a/docs/reference/high-availability.asciidoc b/docs/reference/high-availability.asciidoc new file mode 100644 index 00000000000..03469a0f526 --- /dev/null +++ b/docs/reference/high-availability.asciidoc @@ -0,0 +1,33 @@ +[[high-availability]] += Set up a cluster for high availability + +[partintro] +-- +As with any software that stores data, +it is important to routinely back up your data. +{es}'s <> provide high availability +during runtime; +they enable you to tolerate sporadic node loss +without an interruption of service. + +However, replica shards do not protect an {es} cluster +from catastrophic failure. +You need a backup of your cluster— +a copy in case something goes wrong. + + +{es} offers two features to support high availability for a cluster: + +* <>, +which you can use to back up individual indices or entire clusters. +You can automatically store these backups in a repository on a shared filesystem. + +* <>, +which you can use to copy indices in remote clusters to a local cluster. +You can use {ccr} to recover from the failure of a primary cluster +or serve data locally based on geo-proximity. +-- + +include::high-availability/backup-cluster.asciidoc[] + +include::ccr/index.asciidoc[] diff --git a/docs/reference/administering/backup-and-restore-security-config.asciidoc b/docs/reference/high-availability/backup-and-restore-security-config.asciidoc similarity index 100% rename from docs/reference/administering/backup-and-restore-security-config.asciidoc rename to docs/reference/high-availability/backup-and-restore-security-config.asciidoc diff --git a/docs/reference/administering/backup-cluster-config.asciidoc b/docs/reference/high-availability/backup-cluster-config.asciidoc similarity index 100% rename from docs/reference/administering/backup-cluster-config.asciidoc rename to docs/reference/high-availability/backup-cluster-config.asciidoc diff --git a/docs/reference/administering/backup-cluster-data.asciidoc b/docs/reference/high-availability/backup-cluster-data.asciidoc similarity index 73% rename from docs/reference/administering/backup-cluster-data.asciidoc rename to docs/reference/high-availability/backup-cluster-data.asciidoc index 063018337d6..ed0c732cdb4 100644 --- a/docs/reference/administering/backup-cluster-data.asciidoc +++ b/docs/reference/high-availability/backup-cluster-data.asciidoc @@ -4,14 +4,6 @@ Back up the data ++++ -As with any software that stores data, it is important to routinely back up your -data. {es} replicas provide high availability during runtime; they enable you to -tolerate sporadic node loss without an interruption of service. - -Replicas do not provide protection from catastrophic failure, however. For that, -you need a real backup of your cluster—a complete copy in case something goes -wrong. - To back up your cluster's data, you can use the <>. include::{es-repo-dir}/modules/snapshots.asciidoc[tag=snapshot-intro] diff --git a/docs/reference/administering/backup-cluster.asciidoc b/docs/reference/high-availability/backup-cluster.asciidoc similarity index 100% rename from docs/reference/administering/backup-cluster.asciidoc rename to docs/reference/high-availability/backup-cluster.asciidoc diff --git a/docs/reference/administering/restore-cluster-data.asciidoc b/docs/reference/high-availability/restore-cluster-data.asciidoc similarity index 100% rename from docs/reference/administering/restore-cluster-data.asciidoc rename to docs/reference/high-availability/restore-cluster-data.asciidoc diff --git a/docs/reference/how-to/disk-usage.asciidoc b/docs/reference/how-to/disk-usage.asciidoc index a475122b99e..be03c304e36 100644 --- a/docs/reference/how-to/disk-usage.asciidoc +++ b/docs/reference/how-to/disk-usage.asciidoc @@ -10,7 +10,7 @@ field called `foo` that you need to run histograms on but that you never need to filter on, you can safely disable indexing on this field in your <>: -[source,js] +[source,console] -------------------------------------------------- PUT index { @@ -24,14 +24,13 @@ PUT index } } -------------------------------------------------- -// CONSOLE <> fields store normalization factors in the index in order to be able to score documents. If you only need matching capabilities on a `text` field but do not care about the produced scores, you can configure Elasticsearch to not write norms to the index: -[source,js] +[source,console] -------------------------------------------------- PUT index { @@ -45,14 +44,13 @@ PUT index } } -------------------------------------------------- -// CONSOLE <> fields also store frequencies and positions in the index by default. Frequencies are used to compute scores and positions are used to run phrase queries. If you do not need to run phrase queries, you can tell Elasticsearch to not index positions: -[source,js] +[source,console] -------------------------------------------------- PUT index { @@ -66,14 +64,13 @@ PUT index } } -------------------------------------------------- -// CONSOLE Furthermore if you do not care about scoring either, you can configure Elasticsearch to just index matching documents for every term. You will still be able to search on this field, but phrase queries will raise errors and scoring will assume that terms appear only once in every document. -[source,js] +[source,console] -------------------------------------------------- PUT index { @@ -88,7 +85,6 @@ PUT index } } -------------------------------------------------- -// CONSOLE [float] [[default-dynamic-string-mapping]] @@ -106,7 +102,7 @@ or `keyword`. For instance, here is a template that can be used in order to only map string fields as `keyword`: -[source,js] +[source,console] -------------------------------------------------- PUT index { @@ -124,7 +120,6 @@ PUT index } } -------------------------------------------------- -// CONSOLE [float] === Watch your shard size diff --git a/docs/reference/how-to/recipes/scoring.asciidoc b/docs/reference/how-to/recipes/scoring.asciidoc index 90bff309857..2ccb7a3319a 100644 --- a/docs/reference/how-to/recipes/scoring.asciidoc +++ b/docs/reference/how-to/recipes/scoring.asciidoc @@ -100,7 +100,7 @@ look like this: ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT index { @@ -116,12 +116,10 @@ PUT index } } -------------------------------------------------- -// CONSOLE -// TEST ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- GET index/_search { @@ -137,8 +135,8 @@ GET index/_search } } -------------------------------------------------- -// CONSOLE //TEST[continued] + <1> `pagerank` must be mapped as a <> while with the <> it would @@ -146,7 +144,7 @@ look like below: ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT index { @@ -162,12 +160,11 @@ PUT index } } -------------------------------------------------- -// CONSOLE // TEST ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- GET _search { @@ -188,7 +185,7 @@ GET _search } } -------------------------------------------------- -// CONSOLE + <1> `pagerank` must be mapped as a <> field While both options would return similar scores, there are trade-offs: diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc index f68d8c7ff87..462998c82b3 100644 --- a/docs/reference/how-to/recipes/stemming.asciidoc +++ b/docs/reference/how-to/recipes/stemming.asciidoc @@ -7,7 +7,7 @@ what if a user wants to search for `skiing` specifically? The typical way to do this would be to use a <> in order to have the same content indexed in two different ways: -[source,js] +[source,console] -------------------------------------------------- PUT index { @@ -51,11 +51,10 @@ PUT index/_doc/2 POST index/_refresh -------------------------------------------------- -// CONSOLE With such a setup, searching for `ski` on `body` would return both documents: -[source,js] +[source,console] -------------------------------------------------- GET index/_search { @@ -67,7 +66,6 @@ GET index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] [source,console-result] @@ -116,7 +114,7 @@ On the other hand, searching for `ski` on `body.exact` would only return document `1` since the analysis chain of `body.exact` does not perform stemming. -[source,js] +[source,console] -------------------------------------------------- GET index/_search { @@ -128,7 +126,6 @@ GET index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] [source,console-result] @@ -175,7 +172,7 @@ that solves this exact problem: `quote_field_suffix`. This tells Elasticsearch that the words that appear in between quotes are to be redirected to a different field, see below: -[source,js] +[source,console] -------------------------------------------------- GET index/_search { @@ -188,7 +185,6 @@ GET index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] [source,console-result] diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index 9c445a148db..0f3e112c1dc 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -48,7 +48,7 @@ of an index containing movies that optimizes queries that search over both the name and the plot of the movie by indexing both values into the `name_and_plot` field. -[source,js] +[source,console] -------------------------------------------------- PUT movies { @@ -69,7 +69,6 @@ PUT movies } } -------------------------------------------------- -// CONSOLE [float] === Pre-index data @@ -83,7 +82,7 @@ aggregations. For instance, if documents look like: -[source,js] +[source,console] -------------------------------------------------- PUT index/_doc/1 { @@ -91,11 +90,10 @@ PUT index/_doc/1 "price": 13 } -------------------------------------------------- -// CONSOLE and search requests look like: -[source,js] +[source,console] -------------------------------------------------- GET index/_search { @@ -113,13 +111,12 @@ GET index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] Then documents could be enriched by a `price_range` field at index time, which should be mapped as a <>: -[source,js] +[source,console] -------------------------------------------------- PUT index { @@ -139,12 +136,11 @@ PUT index/_doc/1 "price_range": "10-100" } -------------------------------------------------- -// CONSOLE And then search requests could aggregate this new field rather than running a `range` aggregation on the `price` field. -[source,js] +[source,console] -------------------------------------------------- GET index/_search { @@ -157,7 +153,6 @@ GET index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] [float] @@ -188,7 +183,7 @@ benefit of making better use of the query cache. For instance the below query: -[source,js] +[source,console] -------------------------------------------------- PUT index/_doc/1 { @@ -211,11 +206,10 @@ GET index/_search } } -------------------------------------------------- -// CONSOLE could be replaced with the following query: -[source,js] +[source,console] -------------------------------------------------- GET index/_search { @@ -233,7 +227,6 @@ GET index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] In that case we rounded to the minute, so if the current time is `16:31:29`, @@ -249,7 +242,7 @@ NOTE: It might be tempting to split ranges into a large cacheable part and smaller not cacheable parts in order to be able to leverage the query cache, as shown below: -[source,js] +[source,console] -------------------------------------------------- GET index/_search { @@ -289,7 +282,6 @@ GET index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] However such practice might make the query run slower in some cases since the @@ -322,7 +314,7 @@ and which fields won't. You can tell Elasticsearch to load global ordinals eagerly when starting or refreshing a shard by configuring mappings as described below: -[source,js] +[source,console] -------------------------------------------------- PUT index { @@ -336,7 +328,6 @@ PUT index } } -------------------------------------------------- -// CONSOLE [float] === Warm up the filesystem cache diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc index db0faebf311..13d60661eba 100644 --- a/docs/reference/ilm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -38,7 +38,7 @@ The following example deletes `my_policy`: ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -62,16 +62,14 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE // TEST ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- DELETE _ilm/policy/my_policy -------------------------------------------------- -// CONSOLE // TEST[continued] When the policy is successfully deleted, you receive the following result: diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 5202d384364..78bb76e46cf 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -49,7 +49,7 @@ The following example retrieves the lifecycle state of `my_index`: ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -83,22 +83,20 @@ PUT my_index GET /_cluster/health?wait_for_status=green&timeout=10s -------------------------------------------------- -// CONSOLE // TEST ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- GET my_index/_ilm/explain -------------------------------------------------- -// CONSOLE // TEST[continued] When management of the index is first taken over by ILM, `explain` shows that the index is managed and in the `new` phase: -[source,js] +[source,console-result] -------------------------------------------------- { "indices": { @@ -118,8 +116,8 @@ that the index is managed and in the `new` phase: } } -------------------------------------------------- -// CONSOLE // TESTRESPONSE[skip:no way to know if we will get this response immediately] + <1> Shows if the index is being managed by ILM. If the index is not managed by ILM the other fields will not be shown <2> The name of the policy which ILM is using for this index @@ -134,7 +132,7 @@ Once the policy is running on the index, the response includes a Changes to the underlying policy will not affect this index until the current phase completes. -[source,js] +[source,console-result] -------------------------------------------------- { "indices": { @@ -172,8 +170,8 @@ phase completes. } } -------------------------------------------------- -// CONSOLE // TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] + <1> The JSON phase definition loaded from the specified policy when the index entered this phase <2> The version of the policy that was loaded @@ -183,7 +181,7 @@ entered this phase If {ilm-init} is waiting for a step to complete, the response includes status information for the step that's being performed on the index. -[source,js] +[source,console-result] -------------------------------------------------- { "indices": { @@ -235,8 +233,8 @@ information for the step that's being performed on the index. } } -------------------------------------------------- -// CONSOLE // TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] + <1> Status of the step that's in progress. If the index is in the ERROR step, something went wrong while executing a @@ -244,7 +242,7 @@ step in the policy and you will need to take action for the index to proceed to the next step. To help you diagnose the problem, the explain response shows the step that failed and the step info provides information about the error. -[source,js] +[source,console-result] -------------------------------------------------- { "indices": { @@ -289,7 +287,7 @@ the step that failed and the step info provides information about the error. } } -------------------------------------------------- -// CONSOLE // TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] + <1> The step that caused the error <2> What went wrong diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index 902ea39e454..2fbdd03f43e 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -38,7 +38,7 @@ The following example retrieves `my_policy`: ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -62,22 +62,19 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE -// TEST ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- GET _ilm/policy/my_policy -------------------------------------------------- -// CONSOLE // TEST[continued] If the request succeeds, the body of the response contains the policy definition: -[source,js] +[source,console-result] -------------------------------------------------- { "my_policy": { @@ -104,7 +101,7 @@ If the request succeeds, the body of the response contains the policy definition } } -------------------------------------------------- -// CONSOLE // TESTRESPONSE[s/"modified_date": 82392349/"modified_date": $body.my_policy.modified_date/] + <1> The policy version is incremented whenever the policy is updated <2> When this policy was last modified diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc index 2f8a1ed43af..ce983e8d0ee 100644 --- a/docs/reference/ilm/apis/get-status.asciidoc +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -34,11 +34,10 @@ For more information, see {stack-ov}/security-privileges.html[Security Privilege The following example gets the {ilm-init} plugin status. -[source,js] +[source,console] -------------------------------------------------- GET _ilm/status -------------------------------------------------- -// CONSOLE If the request succeeds, the body of the response shows the operation mode: diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index 7d132290cce..1f49d501462 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -48,7 +48,7 @@ The following example moves `my_index` from the initial step to the ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -79,12 +79,10 @@ PUT my_index } } -------------------------------------------------- -// CONSOLE -// TEST ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST _ilm/move/my_index { @@ -100,7 +98,6 @@ POST _ilm/move/my_index } } -------------------------------------------------- -// CONSOLE // TEST[continued] <1> The step that the index is expected to be in <2> The step that you want to execute diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc index 4dde6b7c05a..d940a2a28b0 100644 --- a/docs/reference/ilm/apis/put-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -42,7 +42,7 @@ For more information, see {stack-ov}/security-privileges.html[Security Privilege The following example creates a new policy named `my_policy`: -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -66,8 +66,6 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE -// TEST If the request succeeds, you receive the following result: diff --git a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc index 6b97d014d98..09ae762d04f 100644 --- a/docs/reference/ilm/apis/remove-policy-from-index.asciidoc +++ b/docs/reference/ilm/apis/remove-policy-from-index.asciidoc @@ -38,7 +38,7 @@ The following example removes the assigned policy from `my_index`. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -69,16 +69,13 @@ PUT my_index } } -------------------------------------------------- -// CONSOLE -// TEST ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST my_index/_ilm/remove -------------------------------------------------- -// CONSOLE // TEST[continued] If the request succeeds, you receive the following result: diff --git a/docs/reference/ilm/apis/slm-api.asciidoc b/docs/reference/ilm/apis/slm-api.asciidoc index 0466924e3de..59c1601ab9b 100644 --- a/docs/reference/ilm/apis/slm-api.asciidoc +++ b/docs/reference/ilm/apis/slm-api.asciidoc @@ -14,7 +14,9 @@ policies, a way to retrieve policies, and a way to delete unwanted policies, as well as a separate API for immediately invoking a snapshot based on a policy. Since SLM falls under the same category as ILM, it is stopped and started by -using the <> ILM APIs. +using the <> ILM APIs. It is, however, managed +by a different enable setting. To disable SLM's functionality, set the cluster +setting `xpack.slm.enabled` to `false` in elasticsearch.yml. [[slm-api-put]] === Put Snapshot Lifecycle Policy API @@ -48,7 +50,7 @@ latest version of a policy. For more information, see The following creates a snapshot lifecycle policy with an id of `daily-snapshots`: -[source,js] +[source,console] -------------------------------------------------- PUT /_slm/policy/daily-snapshots { @@ -59,11 +61,12 @@ PUT /_slm/policy/daily-snapshots "indices": ["data-*", "important"], <5> "ignore_unavailable": false, "include_global_state": false - } + }, + "retention": {} } -------------------------------------------------- -// CONSOLE // TEST[setup:setup-repository] + <1> When the snapshot should be taken, in this case, 1:30am daily <2> The name each snapshot should be given <3> Which repository to take the snapshot in @@ -112,11 +115,10 @@ latest successful and failed invocation that the automatic snapshots have taken. To retrieve a policy, perform a `GET` with the policy's id -[source,js] +[source,console] -------------------------------------------------- GET /_slm/policy/daily-snapshots?human -------------------------------------------------- -// CONSOLE // TEST[continued] The output looks similar to the following: @@ -136,7 +138,14 @@ The output looks similar to the following: "indices": ["data-*", "important"], "ignore_unavailable": false, "include_global_state": false - } + }, + "retention": {} + }, + "stats": { + "snapshots_taken": 0, + "snapshots_failed": 0, + "snapshots_deleted": 0, + "snapshot_deletion_failures": 0 }, "next_execution": "2019-04-24T01:30:00.000Z", <3> "next_execution_millis": 1556048160000 @@ -150,11 +159,10 @@ The output looks similar to the following: Or, to retrieve all policies: -[source,js] +[source,console] -------------------------------------------------- GET /_slm/policy -------------------------------------------------- -// CONSOLE // TEST[continued] [[slm-api-execute]] @@ -174,11 +182,10 @@ waiting for a policy's scheduled invocation. To take an immediate snapshot using a policy, use the following -[source,js] +[source,console] -------------------------------------------------- PUT /_slm/policy/daily-snapshots/_execute -------------------------------------------------- -// CONSOLE // TEST[skip:we can't easily handle snapshots from docs tests] This API will immediately return with the generated snapshot name @@ -197,11 +204,10 @@ The snapshot will be taken in the background, you can use the Once a snapshot has been kicked off, you can see the latest successful or failed snapshot using the get snapshot lifecycle policy API: -[source,js] +[source,console] -------------------------------------------------- GET /_slm/policy/daily-snapshots?human -------------------------------------------------- -// CONSOLE // TEST[skip:we already tested get policy above, the last_failure may not be present though] Which, in this case shows an error because the index did not exist: @@ -221,8 +227,15 @@ Which, in this case shows an error because the index did not exist: "indices": ["data-*", "important"], "ignore_unavailable": false, "include_global_state": false - } + }, + "retention": {} }, + "stats": { + "snapshots_taken": 0, + "snapshots_failed": 1, + "snapshots_deleted": 0, + "snapshot_deletion_failures": 0 + } "last_failure": { <1> "snapshot_name": "daily-snap-2019.04.02-lohisb5ith2n8hxacaq3mw", "time_string": "2019-04-02T01:30:00.000Z", @@ -244,7 +257,7 @@ In this case, it failed due to the "important" index not existing and Updating the policy to change the `ignore_unavailable` setting is done using the same put snapshot lifecycle policy API: -[source,js] +[source,console] -------------------------------------------------- PUT /_slm/policy/daily-snapshots { @@ -258,16 +271,14 @@ PUT /_slm/policy/daily-snapshots } } -------------------------------------------------- -// CONSOLE // TEST[continued] Another snapshot can immediately be executed to ensure the new policy works: -[source,js] +[source,console] -------------------------------------------------- PUT /_slm/policy/daily-snapshots/_execute -------------------------------------------------- -// CONSOLE // TEST[skip:we can't handle snapshots in docs tests] [source,console-result] @@ -281,11 +292,10 @@ PUT /_slm/policy/daily-snapshots/_execute Now retriving the policy shows that the policy has successfully been executed: -[source,js] +[source,console] -------------------------------------------------- GET /_slm/policy/daily-snapshots?human -------------------------------------------------- -// CONSOLE // TEST[skip:we already tested this above and the output may not be available yet] Which now includes the successful snapshot information: @@ -305,7 +315,14 @@ Which now includes the successful snapshot information: "indices": ["data-*", "important"], "ignore_unavailable": true, "include_global_state": false - } + }, + "retention": {} + }, + "stats": { + "snapshots_taken": 1, + "snapshots_failed": 1, + "snapshots_deleted": 0, + "snapshot_deletion_failures": 0 }, "last_success": { <2> "snapshot_name": "daily-snap-2019.04.24-tmtnyjtrsxkhbrrdcgg18a", @@ -330,6 +347,46 @@ Which now includes the successful snapshot information: It is a good idea to test policies using the execute API to ensure they work. +[[slm-get-stats]] +=== Get Snapshot Lifecycle Stats API + +SLM stores statistics on a global and per-policy level about actions taken. These stats can be +retrieved by using the following API: + +==== Example + +[source,console] +-------------------------------------------------- +GET /_slm/stats +-------------------------------------------------- +// TEST[continued] + +Which returns a response similar to: + +[source,js] +-------------------------------------------------- +{ + "retention_runs": 13, + "retention_failed": 0, + "retention_timed_out": 0, + "retention_deletion_time": "1.4s", + "retention_deletion_time_millis": 1404, + "policy_metrics": { + "daily-snapshots": { + "snapshots_taken": 1, + "snapshots_failed": 1, + "snapshots_deleted": 0, + "snapshot_deletion_failures": 0 + } + }, + "total_snapshots_taken": 1, + "total_snapshots_failed": 1, + "total_snapshots_deleted": 0, + "total_snapshot_deletion_failures": 0 +} +-------------------------------------------------- +// TESTRESPONSE[s/runs": 13/runs": $body.retention_runs/ s/_failed": 0/_failed": $body.retention_failed/ s/_timed_out": 0/_timed_out": $body.retention_timed_out/ s/"1.4s"/$body.retention_deletion_time/ s/1404/$body.retention_deletion_time_millis/] + [[slm-api-delete]] === Delete Snapshot Lifecycle Policy API @@ -344,9 +401,8 @@ any currently ongoing snapshots or remove any previously taken snapshots. ==== Example -[source,js] +[source,console] -------------------------------------------------- DELETE /_slm/policy/daily-snapshots -------------------------------------------------- -// CONSOLE // TEST[continued] diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc index b8f1ba656c4..f80bd244956 100644 --- a/docs/reference/ilm/apis/start.asciidoc +++ b/docs/reference/ilm/apis/start.asciidoc @@ -35,7 +35,7 @@ The following example starts the ILM plugin. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -63,16 +63,13 @@ PUT my_index POST _ilm/stop -------------------------------------------------- -// CONSOLE -// TEST ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST _ilm/start -------------------------------------------------- -// CONSOLE // TEST[continued] If the request succeeds, you receive the following result: diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc index aaf9f74c933..0d16140054f 100644 --- a/docs/reference/ilm/apis/stop.asciidoc +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -40,7 +40,7 @@ The following example stops the ILM plugin. ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -66,16 +66,14 @@ PUT _ilm/policy/my_policy PUT my_index -------------------------------------------------- -// CONSOLE // TEST ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST _ilm/stop -------------------------------------------------- -// CONSOLE // TEST[continued] If the request does not encounter errors, you receive the following result: @@ -89,11 +87,10 @@ If the request does not encounter errors, you receive the following result: ////////////////////////// -[source,js] +[source,console] -------------------------------------------------- POST _ilm/start -------------------------------------------------- -// CONSOLE // TEST[continued] ////////////////////////// diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index 831376d68d2..d3c6c9a18d1 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -12,7 +12,7 @@ any issues with the policy, index, or cluster. An example will be helpful in illustrating this, imagine the following policy has been created by a user: -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/shrink-the-index { @@ -30,7 +30,6 @@ PUT _ilm/policy/shrink-the-index } } -------------------------------------------------- -// CONSOLE // TEST This policy waits until the index is at least 5 days old, and then shrinks @@ -39,7 +38,7 @@ the index to 4 shards. Now imagine that a user creates a new index "myindex" with two primary shards, telling it to use the policy they have created: -[source,js] +[source,console] -------------------------------------------------- PUT /myindex { @@ -49,7 +48,6 @@ PUT /myindex } } -------------------------------------------------- -// CONSOLE // TEST[continued] After five days have passed, ILM will attempt to shrink this index from 2 @@ -58,16 +56,15 @@ number of shards. When this occurs, ILM will move this index to the "error" step. Once an index is in this step, information about the reason for the error can be retrieved from the <>: -[source,js] +[source,console] -------------------------------------------------- GET /myindex/_ilm/explain -------------------------------------------------- -// CONSOLE // TEST[continued] Which returns the following information: -[source,js] +[source,console-result] -------------------------------------------------- { "indices" : { @@ -105,8 +102,8 @@ Which returns the following information: } } -------------------------------------------------- -// CONSOLE // TESTRESPONSE[skip:no way to know if we will get this response immediately] + <1> this index is managed by ILM <2> the policy in question, in this case, "shrink-the-index" <3> the current age for the index @@ -123,7 +120,7 @@ the policy is using an incorrect number of shards. So rectifying that in the policy entails updating the existing policy to use one instead of four for the targeted number of shards. -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/shrink-the-index { @@ -141,7 +138,6 @@ PUT _ilm/policy/shrink-the-index } } -------------------------------------------------- -// CONSOLE // TEST[continued] [float] @@ -151,11 +147,10 @@ Once the underlying issue that caused an index to move to the error step has been corrected, index lifecycle management must be told to retry the step to see if it can progress further. This is accomplished by invoking the retry API -[source,js] +[source,console] -------------------------------------------------- POST /myindex/_ilm/retry -------------------------------------------------- -// CONSOLE // TEST[skip:we can't be sure the index is ready to be retried at this point] Once this has been issue, index lifecycle management will asynchronously pick up diff --git a/docs/reference/ilm/getting-started-ilm.asciidoc b/docs/reference/ilm/getting-started-ilm.asciidoc index f74e49fe112..f7ca3f4d216 100644 --- a/docs/reference/ilm/getting-started-ilm.asciidoc +++ b/docs/reference/ilm/getting-started-ilm.asciidoc @@ -25,7 +25,7 @@ a few that are needed for our example. For starters, we will use the policies are defined in JSON and include specific <>. -[source,js] +[source,console] ------------------------ PUT _ilm/policy/datastream_policy <1> { @@ -49,8 +49,7 @@ PUT _ilm/policy/datastream_policy <1> } } ------------------------ -// CONSOLE -// TEST + <1> call to the <> endpoint to create a new policy named "datastream_policy" <2> policy definition sub-object @@ -76,7 +75,7 @@ the new index created from Rollover, we will set the policy via index templates. -[source,js] +[source,console] ----------------------- PUT _template/datastream_template { @@ -89,8 +88,8 @@ PUT _template/datastream_template } } ----------------------- -// CONSOLE // TEST[continued] + <1> match all indices starting with "datastream-". These will include all newly created indices from actions like rollover <2> the name of the lifecycle policy managing the index @@ -109,7 +108,7 @@ The intention here is that the rollover alias is also defined on the index. To begin, we will want to bootstrap our first index to write to. -[source,js] +[source,console] ----------------------- PUT datastream-000001 { @@ -120,7 +119,6 @@ PUT datastream-000001 } } ----------------------- -// CONSOLE // TEST[continued] When creating our index, we have to consider a few important configurations @@ -153,18 +151,17 @@ things like which phase we're in and when we entered that phase. The API will also provide further info if errors occurred, or if we are blocked on certain checks within actions. -[source,js] +[source,console] -------------------------------------------------- GET datastream-*/_ilm/explain -------------------------------------------------- -// CONSOLE // TEST[continued] The above request will retrieve {ilm-init} execution information for all our managed indices. -[source,js] +[source,console-result] -------------------------------------------------- { "indices": { @@ -198,8 +195,8 @@ managed indices. } } -------------------------------------------------- -// CONSOLE // TESTRESPONSE[skip:no way to know if we will get this response immediately] + <1> this index is managed by ILM <2> the policy in question, in this case, "datastream_policy" <3> the current age of the index diff --git a/docs/reference/ilm/getting-started-slm.asciidoc b/docs/reference/ilm/getting-started-slm.asciidoc index baef2021f77..32a5c5ef4d8 100644 --- a/docs/reference/ilm/getting-started-slm.asciidoc +++ b/docs/reference/ilm/getting-started-slm.asciidoc @@ -19,7 +19,7 @@ allows taking snapshots even for indices the role may not have access to. An example of configuring an administrator role for SLM follows: -[source,js] +[source,console] ----------------------------------- POST /_security/role/slm-admin { @@ -32,13 +32,12 @@ POST /_security/role/slm-admin ] } ----------------------------------- -// CONSOLE // TEST[skip:security is not enabled here] Or, for a read-only role that can retrieve policies (but not update, execute, or delete them), as well as only view the history index: -[source,js] +[source,console] ----------------------------------- POST /_security/role/slm-read-only { @@ -51,7 +50,6 @@ POST /_security/role/slm-read-only ] } ----------------------------------- -// CONSOLE // TEST[skip:security is not enabled here] [float] @@ -64,7 +62,7 @@ stored. Repositories can use {plugins}/repository.html[many different backends], including cloud storage providers. You'll probably want to use one of these in production, but for this example we'll use a shared file system repository: -[source,js] +[source,console] ----------------------------------- PUT /_snapshot/my_repository { @@ -74,8 +72,6 @@ PUT /_snapshot/my_repository } } ----------------------------------- -// CONSOLE -// TEST [float] === Setting up a policy @@ -86,7 +82,7 @@ snapshots, what the snapshots should be named, and which indices should be included, among other things. We'll use the <> API to create the policy. -[source,js] +[source,console] -------------------------------------------------- PUT /_slm/policy/nightly-snapshots { @@ -95,10 +91,10 @@ PUT /_slm/policy/nightly-snapshots "repository": "my_repository", <3> "config": { <4> "indices": ["*"] <5> - } + }, + "retention": {} } -------------------------------------------------- -// CONSOLE // TEST[continued] <1> when the snapshot should be taken, using {xpack-ref}/trigger-schedule.html#schedule-cron[Cron syntax], in this @@ -134,11 +130,10 @@ Instead of waiting for our policy to run, let's tell SLM to take a snapshot as using the configuration from our policy right now instead of waiting for 1:30AM. -[source,js] +[source,console] -------------------------------------------------- PUT /_slm/policy/nightly-snapshots/_execute -------------------------------------------------- -// CONSOLE // TEST[skip:we can't easily handle snapshots from docs tests] This request will kick off a snapshot for our policy right now, regardless of @@ -147,11 +142,10 @@ a configuration change, upgrading, or for our purposes, making sure our policy is going to work successfully. The policy will continue to run on its configured schedule after this execution of the policy. -[source,js] +[source,console] -------------------------------------------------- GET /_slm/policy/nightly-snapshots?human -------------------------------------------------- -// CONSOLE // TEST[continued] This request will return a response that includes the policy, as well as @@ -171,7 +165,8 @@ next time the policy will be executed. "repository": "my_repository", "config": { "indices": ["*"], - } + }, + "retention": {} }, "last_success": { <1> "snapshot_name": "nightly-snap-2019.04.24-tmtnyjtrsxkhbrrdcgg18a", <2> diff --git a/docs/reference/ilm/ilm-with-existing-indices.asciidoc b/docs/reference/ilm/ilm-with-existing-indices.asciidoc index 60aff62b714..cff82df8388 100644 --- a/docs/reference/ilm/ilm-with-existing-indices.asciidoc +++ b/docs/reference/ilm/ilm-with-existing-indices.asciidoc @@ -19,7 +19,7 @@ log message and a timestamp. First, we need to create a template for these indices: -[source,js] +[source,console] ----------------------- PUT _template/mylogs_template { @@ -42,12 +42,10 @@ PUT _template/mylogs_template } } ----------------------- -// CONSOLE -// TEST And we'll ingest a few documents to create a few daily indices: -[source,js] +[source,console] ----------------------- POST mylogs-pre-ilm-2019.06.24/_doc { @@ -55,10 +53,9 @@ POST mylogs-pre-ilm-2019.06.24/_doc "message": "this is one log message" } ----------------------- -// CONSOLE // TEST[continued] -[source,js] +[source,console] ----------------------- POST mylogs-pre-ilm-2019.06.25/_doc { @@ -66,7 +63,6 @@ POST mylogs-pre-ilm-2019.06.25/_doc "message": "this is another log message" } ----------------------- -// CONSOLE // TEST[continued] Now that we have these indices, we'll look at a few different ways of migrating @@ -117,7 +113,8 @@ action. For example, if you created a policy for your new indices with each phase like so: -[source,js] + +[source,console] ----------------------- PUT _ilm/policy/mylogs_policy { @@ -154,14 +151,13 @@ PUT _ilm/policy/mylogs_policy } } ----------------------- -// CONSOLE // TEST[continued] You can create a policy for pre-existing indices by removing the `rollover` action, and in this case, the `hot` phase is now empty so we can remove that too: -[source,js] +[source,console] ----------------------- PUT _ilm/policy/mylogs_policy_existing { @@ -191,7 +187,6 @@ PUT _ilm/policy/mylogs_policy_existing } } ----------------------- -// CONSOLE // TEST[continued] Creating a separate policy for existing indices will also allow using different @@ -204,7 +199,7 @@ the index name when calling the <> to set the policy name, but be careful that you don't include any indices that you don't want to change the policy for: -[source,js] +[source,console] ----------------------- PUT mylogs-pre-ilm*/_settings <1> { @@ -215,7 +210,6 @@ PUT mylogs-pre-ilm*/_settings <1> } } ----------------------- -// CONSOLE // TEST[continued] <1> This pattern will match all indices with names that start with @@ -247,7 +241,7 @@ set up. For this section, we'll be using the same setup described in First, we'll set up a policy with rollover, and can include any additional phases required. For simplicity, we'll just use rollover: -[source,js] +[source,console] ----------------------- PUT _ilm/policy/sample_policy { @@ -265,13 +259,12 @@ PUT _ilm/policy/sample_policy } } ----------------------- -// CONSOLE // TEST[continued] And now we'll update the index template for our indices to include the relevant {ilm-init} settings: -[source,js] +[source,console] ----------------------- PUT _template/mylogs_template { @@ -300,8 +293,8 @@ PUT _template/mylogs_template } } ----------------------- -// CONSOLE // TEST[continued] + <1> The new index pattern has a prefix compared to the old one, this will make it easier to reindex later <2> The name of the policy we defined above @@ -310,7 +303,7 @@ PUT _template/mylogs_template And create the first index with the alias specified in the `rollover_alias` setting in the index template: -[source,js] +[source,console] ----------------------- PUT ilm-mylogs-000001 { @@ -321,7 +314,6 @@ PUT ilm-mylogs-000001 } } ----------------------- -// CONSOLE // TEST[continued] All new documents should be indexed via the `mylogs` alias at this point. Adding @@ -340,7 +332,7 @@ can grow very, very quickly. We'll need to set the poll interval to something shorter to ensure that the new indices don't grow too large while waiting for the rollover check: -[source,js] +[source,console] ----------------------- PUT _cluster/settings { @@ -349,8 +341,8 @@ PUT _cluster/settings } } ----------------------- -// CONSOLE // TEST[skip:don't want to overwrite this setting for other tests] + <1> This tells ILM to check for rollover conditions every minute We're now ready to reindex our data using the <>. If @@ -367,7 +359,7 @@ documents will retain their original IDs. One way to do this is to use a <> in the reindex call to append the original index name to the document ID. -[source,js] +[source,console] ----------------------- POST _reindex { @@ -381,8 +373,8 @@ POST _reindex } } ----------------------- -// CONSOLE // TEST[continued] + <1> This index pattern matches our existing indices. Using the prefix for the new indices makes using this index pattern much easier. <2> The alias set up above @@ -397,7 +389,7 @@ be queried using that alias as well. We should also be sure to set the {ilm-init} poll interval back to its default value, because keeping it set too low can cause unnecessary load on the current master node: -[source,js] +[source,console] ----------------------- PUT _cluster/settings { @@ -407,7 +399,6 @@ PUT _cluster/settings } ----------------------- -// CONSOLE // TEST[skip:don't want to overwrite this setting for other tests] All of the reindexed data should now be accessible via the alias set up above, diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index 8ca2b68ac36..02fbb980b08 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -30,7 +30,7 @@ duration format (see <>). `min_age` defaults to zero seconds `0s` for each phase if not specified. -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -54,17 +54,18 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE The Above example configures a policy that moves the index into the warm phase after one day. Until then, the index is in a waiting state. After moving into the warm phase, it will wait until 30 days have elapsed before moving to the delete phase and deleting the index. -`min_age` is usually the time elapsed from the time the index is created. If the -index is rolled over, then `min_age` is the time elapsed from the time the index -is rolled over. The intention here is to execute following phases and actions -relative to when data was written last to a rolled over index. +`min_age` is usually the time elapsed from the time the index is created, unless +the `index.lifecycle.origination_date` index setting is configured, in which +case the `min_age` will be the time elapsed since that specified date. If the +index is rolled over, then `min_age` is the time elapsed from the time the +index is rolled over. The intention here is to execute following phases and +actions relative to when data was written last to a rolled over index. The previous phase's actions must complete before {ilm} will check `min_age` and transition into the next phase. By default, {ilm} checks for indices that meet @@ -151,7 +152,7 @@ is invalid. In this example, the index's number of replicas is changed to `2`, while allocation rules are unchanged. -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -168,13 +169,12 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE ===== Example: Assign index to node with specific "box_type" attribute This example assigns the index to nodes with `box_type` attribute of "hot" or "warm". -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -193,14 +193,13 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE ===== Example: Assign index to a specific node and update replica settings This example updates the index to have one replica per shard and be allocated to nodes with a `box_type` attribute of "cold". -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -220,7 +219,6 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE [[ilm-delete-action]] ==== Delete @@ -231,7 +229,7 @@ The Delete Action does just that, it deletes the index. This action does not have any options associated with it. -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -246,7 +244,6 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE [[ilm-forcemerge-action]] ==== Force Merge @@ -270,7 +267,7 @@ most a specific number of <>. index, set it to `1` |====== -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -287,7 +284,6 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE [[ilm-freeze-action]] ==== Freeze @@ -297,7 +293,7 @@ Phases allowed: cold. This action will <> the index by calling the <>. -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -312,7 +308,6 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE [IMPORTANT] ================================ @@ -332,7 +327,7 @@ This action will set the index to be read-only This action does not have any options associated with it. -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -347,7 +342,6 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE [[ilm-rollover-action]] ==== Rollover @@ -371,7 +365,7 @@ For example, if an index to be managed has an alias `my_data`. The managed index "my_index" must be the write index for the alias. For more information, read <>. -[source,js] +[source,console] -------------------------------------------------- PUT my_index { @@ -386,7 +380,6 @@ PUT my_index } } -------------------------------------------------- -// CONSOLE The Rollover Action rolls an alias over to a new index when the existing index meets one of the rollover conditions. @@ -416,7 +409,7 @@ three are required to be specified. This example rolls the index over when it is at least 100 gigabytes. -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -433,14 +426,13 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE ===== Example: Rollover when index has too many documents This example rolls the index over when it contains at least 100000000 documents. -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -457,14 +449,13 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE ===== Example: Rollover when index is too old This example rolls the index over when it has been created at least 7 days ago. -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -481,7 +472,6 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE ===== Example: Rollover when index is too old or too large @@ -489,7 +479,7 @@ This example rolls the index over when it has been created at least 7 days ago or it is at least 100 gigabytes. In this case, the index will be rolled over when any of the conditions is met. -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -507,8 +497,6 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE - ===== Example: Rollover condition stalls phase transition @@ -516,7 +504,7 @@ The Rollover action will only complete once one of its conditions is met. This means that any proceeding phases will be blocked until Rollover succeeds. -[source,js] +[source,console] -------------------------------------------------- PUT /_ilm/policy/rollover_policy { @@ -539,7 +527,6 @@ PUT /_ilm/policy/rollover_policy } } -------------------------------------------------- -// CONSOLE The above example illustrates a policy which attempts to delete an index one day after the index has been rolled over. It does not @@ -568,7 +555,7 @@ Indicies that don't set this value have an implicit default priority of 1. |====== -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -585,7 +572,6 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE [[ilm-shrink-action]] ==== Shrink @@ -621,7 +607,7 @@ then the new index will be named "shrink-logs". source index. |====== -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -638,7 +624,6 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE [[ilm-unfollow-action]] ==== Unfollow @@ -677,7 +662,7 @@ if it encounters a non follower index, then the unfollow action leaves that index untouched and lets the next action operate on this index. -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/my_policy { @@ -692,7 +677,6 @@ PUT _ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE === Full Policy @@ -703,7 +687,7 @@ and increases the replicas to 2, force merges and shrinks. After 60 days it enters the cold phase and allocates to "cold" nodes, and after 90 days the index is deleted. -[source,js] +[source,console] -------------------------------------------------- PUT _ilm/policy/full_policy { @@ -751,4 +735,3 @@ PUT _ilm/policy/full_policy } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc index 7af686238f3..83394de577c 100644 --- a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc @@ -8,7 +8,7 @@ first define a lifecycle policy for it to use. The following request creates a policy called `my_policy` in Elasticsearch which we can later use to manage our indexes. -[source,js] +[source,console] ------------------------ PUT _ilm/policy/my_policy { @@ -31,7 +31,7 @@ PUT _ilm/policy/my_policy } } ------------------------ -// CONSOLE + <1> Rollover the index when it reaches 25GB in size <2> Delete the index when its 30 days old @@ -50,7 +50,7 @@ To set the policy for an index there are two options: The `index.lifecycle.name` setting can be set in an index template so that it is automatically applied to indexes matching the templates index pattern: -[source,js] +[source,console] ----------------------- PUT _template/my_template { @@ -63,7 +63,7 @@ PUT _template/my_template } } ----------------------- -// CONSOLE + <1> This template will be applied to all indexes which have a name starting with `test-` <2> The template will set the policy to be used to `my_policy` @@ -71,7 +71,7 @@ with `test-` Now that a policy exists and is used in an index template we can create an initial index which will be managed by our policy: -[source,js] +[source,console] ----------------------- PUT test-000001 { @@ -82,7 +82,7 @@ PUT test-000001 } } ----------------------- -// CONSOLE + <1> Set this initial index to be the write index for this alias. We can now write data to the `test-alias` alias. Because we have a rollover @@ -94,7 +94,7 @@ create a new index and roll the alias over to use the new index automatically. The `index.lifecycle.name` setting can be set on an individual create index request so {ilm} immediately starts managing the index: -[source,js] +[source,console] ----------------------- PUT test-index { @@ -105,7 +105,7 @@ PUT test-index } } ----------------------- -// CONSOLE + IMPORTANT: Its recommended not to use the create index API with a policy that defines a rollover action. If you do so, the new index as the result of the diff --git a/docs/reference/ilm/start-stop-ilm.asciidoc b/docs/reference/ilm/start-stop-ilm.asciidoc index 6e2629b60ea..fcbbbcd8115 100644 --- a/docs/reference/ilm/start-stop-ilm.asciidoc +++ b/docs/reference/ilm/start-stop-ilm.asciidoc @@ -19,7 +19,7 @@ To see the current operating status of ILM, use the <>. -[source,js] +[source,console] -------------------------------------------------- POST _ilm/stop -------------------------------------------------- -// CONSOLE // TEST[continued] When stopped, all further policy actions will be halted. This will be reflected in the Status API //// -[source,js] +[source,console] -------------------------------------------------- GET _ilm/status -------------------------------------------------- -// CONSOLE // TEST[continued] //// @@ -111,19 +107,19 @@ GET _ilm/status "operation_mode": "STOPPING" } -------------------------------------------------- +// TESTRESPONSE[s/"STOPPING"/$body.operation_mode/] The ILM service will then, asynchronously, run all policies to a point where it is safe to stop. After ILM verifies that it is safe, it will move to the `STOPPED` mode. //// -[source,js] +[source,console] -------------------------------------------------- PUT trigger_ilm_cs_action GET _ilm/status -------------------------------------------------- -// CONSOLE // TEST[continued] //// @@ -133,6 +129,7 @@ GET _ilm/status "operation_mode": "STOPPED" } -------------------------------------------------- +// TESTRESPONSE[s/"STOPPED"/$body.operation_mode/] [float] === Starting ILM @@ -140,19 +137,17 @@ GET _ilm/status To start ILM and continue executing policies, use the <>. -[source,js] +[source,console] -------------------------------------------------- POST _ilm/start -------------------------------------------------- -// CONSOLE // TEST[continued] //// -[source,js] +[source,console] -------------------------------------------------- GET _ilm/status -------------------------------------------------- -// CONSOLE // TEST[continued] //// diff --git a/docs/reference/ilm/update-lifecycle-policy.asciidoc b/docs/reference/ilm/update-lifecycle-policy.asciidoc index 5a0034a2a60..e29967b60ec 100644 --- a/docs/reference/ilm/update-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/update-lifecycle-policy.asciidoc @@ -25,7 +25,7 @@ If an index is assigned to the policy, it will be assigned the latest version of To show this, let's create a policy `my_policy`. -[source,js] +[source,console] ------------------------ PUT _ilm/policy/my_policy { @@ -48,7 +48,6 @@ PUT _ilm/policy/my_policy } } ------------------------ -// CONSOLE This newly defined policy will be created and assigned to have a version equal to 1. Since we haven't assigned any indices to this policy, any updates that @@ -57,7 +56,7 @@ by this policy. Updating the Delete phase's minimum age can be done in an update request. -[source,js] +[source,console] ------------------------ PUT _ilm/policy/my_policy { @@ -80,23 +79,22 @@ PUT _ilm/policy/my_policy } } ------------------------ -// CONSOLE // TEST[continued] + <1> update `min_age` to 10 days ////////// -[source,js] +[source,console] -------------------------------------------------- GET _ilm/policy/my_policy -------------------------------------------------- -// CONSOLE // TEST[continued] ////////// When we get the policy, we will see it reflect our latest changes, but with its version bumped to 2. -[source,js] +[source,console-result] -------------------------------------------------- { "my_policy": { @@ -123,8 +121,8 @@ with its version bumped to 2. } } -------------------------------------------------- -// CONSOLE // TESTRESPONSE[s/"modified_date": 82392349/"modified_date": $body.my_policy.modified_date/] + <1> The updated version value <2> The timestamp when this policy was updated last. @@ -140,7 +138,7 @@ indices that are currently executing the corresponding `hot` phase. Let's say we have an index `my_index` managed by the below `my_executing_policy` definition. -[source,js] +[source,console] ------------------------ PUT _ilm/policy/my_executing_policy { @@ -163,10 +161,9 @@ PUT _ilm/policy/my_executing_policy } } ------------------------ -// CONSOLE //// -[source,js] +[source,console] ------------------------ PUT my_index { @@ -175,21 +172,19 @@ PUT my_index } } ------------------------ -// CONSOLE // TEST[continued] //// The <> is useful to introspect managed indices to see which phase definition they are currently executing. Using this API, we can find out that `my_index` is currently checking if it is ready to be rolled over. -[source,js] +[source,console] -------------------------------------------------- GET my_index/_ilm/explain -------------------------------------------------- -// CONSOLE // TEST[continued] -[source,js] +[source,console-result] -------------------------------------------------- { "indices": { @@ -222,12 +217,11 @@ GET my_index/_ilm/explain } } -------------------------------------------------- -// CONSOLE // TESTRESPONSE[skip:no way to know if we will get this response immediately] We can update `my_executing_policy` to enter the hot phase after one day. -[source,js] +[source,console] ------------------------ PUT _ilm/policy/my_executing_policy { @@ -251,23 +245,22 @@ PUT _ilm/policy/my_executing_policy } } ------------------------ -// CONSOLE // TEST[continued] + <1> updated `min_age` from "0ms" to "1d" The index `my_index` has already entered the hot phase, so it will still use version 1 of the policy until it completes the hot phase. //// -[source,js] +[source,console] -------------------------------------------------- GET my_index/_ilm/explain -------------------------------------------------- -// CONSOLE // TEST[continued] //// -[source,js] +[source,console-result] -------------------------------------------------- { "indices": { @@ -300,14 +293,14 @@ GET my_index/_ilm/explain } } -------------------------------------------------- -// CONSOLE // TESTRESPONSE[skip:no way to know if we will get this response immediately] + <1> the version of the policy used for executing the hot phase We can also update `my_executing_policy` to have no rollover action and, instead, go directly into a newly introduced `warm` phase. -[source,js] +[source,console] ------------------------ PUT _ilm/policy/my_executing_policy { @@ -331,7 +324,6 @@ PUT _ilm/policy/my_executing_policy } } ------------------------ -// CONSOLE // TEST[continued] Now, version 3 of this policy has no `hot` phase, but if we run the @@ -339,15 +331,14 @@ Explain API again, we will see that nothing has changed. The index `my_index` is still executing version 1 of the policy. //// -[source,js] +[source,console] -------------------------------------------------- GET my_index/_ilm/explain -------------------------------------------------- -// CONSOLE // TEST[continued] //// -[source,js] +[source,console-result] -------------------------------------------------- { "indices": { @@ -380,8 +371,8 @@ GET my_index/_ilm/explain } } -------------------------------------------------- -// CONSOLE // TESTRESPONSE[skip:no way to know if we will get this response immediately] + <1> the version of the policy used for executing the hot phase After indexing one document into `my_index` so that rollover succeeds and @@ -389,7 +380,7 @@ moves onto the next phase, we will notice something new. The index will move into the next phase in the updated version 3 of its policy. //// -[source,js] +[source,console] -------------------------------------------------- PUT my_index/_doc/1 { @@ -398,11 +389,10 @@ PUT my_index/_doc/1 GET my_index/_ilm/explain -------------------------------------------------- -// CONSOLE // TEST[continued] //// -[source,js] +[source,console-result] -------------------------------------------------- { "indices": { @@ -435,8 +425,8 @@ GET my_index/_ilm/explain } } -------------------------------------------------- -// CONSOLE // TESTRESPONSE[skip:There is no way to force the index to move to the next step in a timely manner] + <1> The index has moved to using version 3 of the policy `my_index` will move to the next phase in the latest policy definition, which is the newly added `warm` phase. @@ -453,7 +443,7 @@ it will move on to the next phase in `my_other_policy`. So if it was on the `hot` phase before, it will move to the `delete` phase after the `hot` phase concluded. //// -[source,js] +[source,console] ------------------------ PUT _ilm/policy/my_policy { @@ -497,18 +487,16 @@ PUT my_index } } ------------------------ -// CONSOLE //// -[source,js] +[source,console] -------------------------------------------------- PUT my_index/_settings { "lifecycle.name": "my_other_policy" } -------------------------------------------------- -// CONSOLE // TEST[continued] The change to the new policy will not happen immediately. The currently executing phase diff --git a/docs/reference/ilm/using-policies-rollover.asciidoc b/docs/reference/ilm/using-policies-rollover.asciidoc index 11be1bed85a..04b084a0e8e 100644 --- a/docs/reference/ilm/using-policies-rollover.asciidoc +++ b/docs/reference/ilm/using-policies-rollover.asciidoc @@ -55,7 +55,7 @@ criteria specified for a phase is relative to the rollover time for indices. In this example, that means the index will be deleted 30 days after rollover, not 30 days from when the index was created. -[source,js] +[source,console] -------------------------------------------------- PUT /_ilm/policy/my_policy { @@ -78,13 +78,12 @@ PUT /_ilm/policy/my_policy } } -------------------------------------------------- -// CONSOLE To use an {ilm} policy, you need to specify it in the index template used to create the indices. For example, the following template associates `my_policy` with indices created from the template `my_template`. -[source,js] +[source,console] ----------------------- PUT _template/my_template { @@ -97,7 +96,7 @@ PUT _template/my_template } } ----------------------- -// CONSOLE + <1> Template applies to all indices with the prefix test- <2> Associates my_policy with all indices created with this template <3> Rolls over the write alias test when the rollover action is triggered @@ -105,7 +104,7 @@ PUT _template/my_template To be able to start using the policy for these `test-*` indexes we need to bootstrap the process by creating the first index. -[source,js] +[source,console] ----------------------- PUT test-000001 <1> { @@ -116,7 +115,7 @@ PUT test-000001 <1> } } ----------------------- -// CONSOLE + <1> Creates the index called test-000001. The rollover action increments the suffix number for each subsequent index. <2> Designates this index as the write index for this alias. diff --git a/docs/reference/images/sql/client-apps/squirell-1-view-drivers.png b/docs/reference/images/sql/client-apps/squirell-1-view-drivers.png index 22abbbed741..b5ca1c95126 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-1-view-drivers.png and b/docs/reference/images/sql/client-apps/squirell-1-view-drivers.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-2-new-driver.png b/docs/reference/images/sql/client-apps/squirell-2-new-driver.png deleted file mode 100644 index 756b308a6a3..00000000000 Binary files a/docs/reference/images/sql/client-apps/squirell-2-new-driver.png and /dev/null differ diff --git a/docs/reference/images/sql/client-apps/squirell-2-select-driver.png b/docs/reference/images/sql/client-apps/squirell-2-select-driver.png new file mode 100644 index 00000000000..7b55d938ce0 Binary files /dev/null and b/docs/reference/images/sql/client-apps/squirell-2-select-driver.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-3-add-driver.png b/docs/reference/images/sql/client-apps/squirell-3-add-driver.png index 29f06b7033d..9b476f2bc19 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-3-add-driver.png and b/docs/reference/images/sql/client-apps/squirell-3-add-driver.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-4-driver-list.png b/docs/reference/images/sql/client-apps/squirell-4-driver-list.png index a269e29d672..990669f8bbf 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-4-driver-list.png and b/docs/reference/images/sql/client-apps/squirell-4-driver-list.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-5-add-alias.png b/docs/reference/images/sql/client-apps/squirell-5-add-alias.png index 1fc8e9ad601..a23e348f45c 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-5-add-alias.png and b/docs/reference/images/sql/client-apps/squirell-5-add-alias.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-7-data.png b/docs/reference/images/sql/client-apps/squirell-7-data.png index 70837963b74..ccfcd2593bb 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-7-data.png and b/docs/reference/images/sql/client-apps/squirell-7-data.png differ diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 93da06d9b96..9ae1dac826e 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -234,13 +234,20 @@ specific index module: The length of time that a <> remains available for <>. Defaults to `60s`. - `index.default_pipeline`:: + `index.default_pipeline`:: The default <> pipeline for this index. Index requests will fail if the default pipeline is set and the pipeline does not exist. The default may be overridden using the `pipeline` parameter. The special pipeline name `_none` indicates no ingest pipeline should be run. + `index.required_pipeline`:: + The required <> pipeline for this index. Index requests + will fail if the required pipeline is set and the pipeline does not exist. + The required pipeline can not be overridden with the `pipeline` parameter. A + default pipeline and a required pipeline can not both be set. The special + pipeline name `_none` indicates no ingest pipeline will run. + [float] === Settings in other index modules diff --git a/docs/reference/index-modules/allocation/delayed.asciidoc b/docs/reference/index-modules/allocation/delayed.asciidoc index c6626a9dec3..fb8be743e60 100644 --- a/docs/reference/index-modules/allocation/delayed.asciidoc +++ b/docs/reference/index-modules/allocation/delayed.asciidoc @@ -36,7 +36,7 @@ dynamic setting, which defaults to `1m`. This setting can be updated on a live index (or on all indices): -[source,js] +[source,console] ------------------------------ PUT _all/_settings { @@ -45,7 +45,6 @@ PUT _all/_settings } } ------------------------------ -// CONSOLE // TEST[s/^/PUT test\n/] With delayed allocation enabled, the above scenario changes to look like this: @@ -79,11 +78,11 @@ relocation begins, cancelling recovery in favour of the synced shard is cheap. The number of shards whose allocation has been delayed by this timeout setting can be viewed with the <>: -[source,js] +[source,console] ------------------------------ GET _cluster/health <1> ------------------------------ -// CONSOLE + <1> This request will return a `delayed_unassigned_shards` value. ==== Removing a node permanently @@ -92,7 +91,7 @@ If a node is not going to return and you would like Elasticsearch to allocate the missing shards immediately, just update the timeout to zero: -[source,js] +[source,console] ------------------------------ PUT _all/_settings { @@ -101,7 +100,6 @@ PUT _all/_settings } } ------------------------------ -// CONSOLE // TEST[s/^/PUT test\n/] You can reset the timeout as soon as the missing shards have started to recover. diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index ac982c61657..8eaac30aa50 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -20,8 +20,6 @@ include::setup/setup-xes.asciidoc[] include::monitoring/configuring-monitoring.asciidoc[] -include::{xes-repo-dir}/security/configuring-es.asciidoc[] - include::setup/setup-xclient.asciidoc[] include::setup/bootstrap-checks-xes.asciidoc[] @@ -56,7 +54,9 @@ include::rollup/index.asciidoc[] include::frozen-indices.asciidoc[] -include::administering.asciidoc[] +include::high-availability.asciidoc[] + +include::security/index.asciidoc[] include::commands/index.asciidoc[] diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index 20b1b77d5fd..3fd464027a4 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -69,6 +69,7 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> +* <> * <> include::indices/create-index.asciidoc[] @@ -139,6 +140,8 @@ include::indices/clearcache.asciidoc[] include::indices/flush.asciidoc[] +include::indices/synced-flush.asciidoc[] + include::indices/refresh.asciidoc[] include::indices/forcemerge.asciidoc[] diff --git a/docs/reference/indices/add-alias.asciidoc b/docs/reference/indices/add-alias.asciidoc index 427ddf631ce..c1f75bdb2dd 100644 --- a/docs/reference/indices/add-alias.asciidoc +++ b/docs/reference/indices/add-alias.asciidoc @@ -6,7 +6,7 @@ Creates or updates an index alias. -include::{docdir}/glossary.asciidoc[tag=index-alias-def] +include::{docdir}/glossary.asciidoc[tag=index-alias-desc] [source,console] ---- diff --git a/docs/reference/indices/alias-exists.asciidoc b/docs/reference/indices/alias-exists.asciidoc index 8caa3080fee..ed38e44edc2 100644 --- a/docs/reference/indices/alias-exists.asciidoc +++ b/docs/reference/indices/alias-exists.asciidoc @@ -6,7 +6,7 @@ Checks if an index alias exists. -include::{docdir}/glossary.asciidoc[tag=index-alias-def] +include::{docdir}/glossary.asciidoc[tag=index-alias-desc] [source,console] ---- diff --git a/docs/reference/indices/aliases.asciidoc b/docs/reference/indices/aliases.asciidoc index 991b65b0793..1172f9cf4d1 100644 --- a/docs/reference/indices/aliases.asciidoc +++ b/docs/reference/indices/aliases.asciidoc @@ -6,7 +6,7 @@ Adds or removes index aliases. -include::{docdir}/glossary.asciidoc[tag=index-alias-def] +include::{docdir}/glossary.asciidoc[tag=index-alias-desc] [source,console] ---- diff --git a/docs/reference/indices/delete-alias.asciidoc b/docs/reference/indices/delete-alias.asciidoc index 4ab144a2fd6..3a3eed4b80a 100644 --- a/docs/reference/indices/delete-alias.asciidoc +++ b/docs/reference/indices/delete-alias.asciidoc @@ -6,7 +6,7 @@ Deletes an existing index alias. -include::{docdir}/glossary.asciidoc[tag=index-alias-def] +include::{docdir}/glossary.asciidoc[tag=index-alias-desc] [source,console] ---- diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index b8a9f43f95f..92054866862 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -60,201 +60,9 @@ POST _flush -------------------------------------------------- // TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] + +[float] [[synced-flush-api]] ==== Synced Flush -{es} keeps track of which shards have received indexing activity recently, and -considers shards that have not received any indexing operations for 5 minutes to -be inactive. When a shard becomes inactive {es} performs a special kind of flush -known as a _synced flush_. A synced flush performs a normal -<> on each copy of the shard, and then adds a marker known -as the `sync_id` to each copy to indicate that these copies have identical -Lucene indices. Comparing the `sync_id` markers of the two copies is a very -efficient way to check whether they have identical contents. - -When allocating shard copies, {es} must ensure that each replica contains the -same data as the primary. If the shard copies have been synced-flushed and the -replica shares a `sync_id` with the primary then {es} knows that the two copies -have identical contents. This means there is no need to copy any segment files -from the primary to the replica, which saves a good deal of time during -recoveries and restarts. - -This is particularly useful for clusters having lots of indices which are very -rarely updated, such as with time-based indices. Without the synced flush -marker, recovery of this kind of cluster would be much slower. - -To check whether a shard has a `sync_id` marker or not, look for the `commit` -section of the shard stats returned by the <> API: - -[source,console] --------------------------------------------------- -GET twitter/_stats?filter_path=**.commit&level=shards <1> --------------------------------------------------- -// TEST[s/^/PUT twitter\nPOST twitter\/_flush\/synced\n/] -<1> `filter_path` is used to reduce the verbosity of the response, but is entirely optional - - -which returns something similar to: - -[source,console-result] --------------------------------------------------- -{ - "indices": { - "twitter": { - "shards": { - "0": [ - { - "commit" : { - "id" : "3M3zkw2GHMo2Y4h4/KFKCg==", - "generation" : 3, - "user_data" : { - "translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA", - "history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ", - "local_checkpoint" : "-1", - "translog_generation" : "2", - "max_seq_no" : "-1", - "sync_id" : "AVvFY-071siAOuFGEO9P", <1> - "max_unsafe_auto_id_timestamp" : "-1", - "min_retained_seq_no" : "0" - }, - "num_docs" : 0 - } - } - ] - } - } - } -} --------------------------------------------------- -// TESTRESPONSE[s/"id" : "3M3zkw2GHMo2Y4h4\/KFKCg=="/"id": $body.indices.twitter.shards.0.0.commit.id/] -// TESTRESPONSE[s/"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA"/"translog_uuid": $body.indices.twitter.shards.0.0.commit.user_data.translog_uuid/] -// TESTRESPONSE[s/"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ"/"history_uuid": $body.indices.twitter.shards.0.0.commit.user_data.history_uuid/] -// TESTRESPONSE[s/"sync_id" : "AVvFY-071siAOuFGEO9P"/"sync_id": $body.indices.twitter.shards.0.0.commit.user_data.sync_id/] -<1> the `sync id` marker - -NOTE: The `sync_id` marker is removed as soon as the shard is flushed again, and -{es} may trigger an automatic flush of a shard at any time if there are -unflushed operations in the shard's translog. In practice this means that one -should consider any indexing operation on an index as having removed its -`sync_id` markers. - -[float] -==== Synced Flush API - -The Synced Flush API allows an administrator to initiate a synced flush -manually. This can be particularly useful for a planned cluster restart where -you can stop indexing but don't want to wait for 5 minutes until all indices -are marked as inactive and automatically sync-flushed. - -You can request a synced flush even if there is ongoing indexing activity, and -{es} will perform the synced flush on a "best-effort" basis: shards that do not -have any ongoing indexing activity will be successfully sync-flushed, and other -shards will fail to sync-flush. The successfully sync-flushed shards will have -faster recovery times as long as the `sync_id` marker is not removed by a -subsequent flush. - -[source,console] --------------------------------------------------- -POST twitter/_flush/synced --------------------------------------------------- -// TEST[setup:twitter] - -The response contains details about how many shards were successfully -sync-flushed and information about any failure. - -Here is what it looks like when all shards of a two shards and one replica -index successfully sync-flushed: - -[source,console-result] --------------------------------------------------- -{ - "_shards": { - "total": 2, - "successful": 2, - "failed": 0 - }, - "twitter": { - "total": 2, - "successful": 2, - "failed": 0 - } -} --------------------------------------------------- -// TESTRESPONSE[s/"successful": 2/"successful": 1/] - -Here is what it looks like when one shard group failed due to pending -operations: - -[source,js] --------------------------------------------------- -{ - "_shards": { - "total": 4, - "successful": 2, - "failed": 2 - }, - "twitter": { - "total": 4, - "successful": 2, - "failed": 2, - "failures": [ - { - "shard": 1, - "reason": "[2] ongoing operations on primary" - } - ] - } -} --------------------------------------------------- -// NOTCONSOLE - -NOTE: The above error is shown when the synced flush fails due to concurrent -indexing operations. The HTTP status code in that case will be `409 Conflict`. - -Sometimes the failures are specific to a shard copy. The copies that failed -will not be eligible for fast recovery but those that succeeded still will be. -This case is reported as follows: - -[source,js] --------------------------------------------------- -{ - "_shards": { - "total": 4, - "successful": 1, - "failed": 1 - }, - "twitter": { - "total": 4, - "successful": 3, - "failed": 1, - "failures": [ - { - "shard": 1, - "reason": "unexpected error", - "routing": { - "state": "STARTED", - "primary": false, - "node": "SZNr2J_ORxKTLUCydGX4zA", - "relocating_node": null, - "shard": 1, - "index": "twitter" - } - } - ] - } -} --------------------------------------------------- -// NOTCONSOLE - -NOTE: When a shard copy fails to sync-flush, the HTTP status code returned will -be `409 Conflict`. - -The synced flush API can be applied to more than one index with a single call, -or even on `_all` the indices. - -[source,console] --------------------------------------------------- -POST kimchy,elasticsearch/_flush/synced - -POST _flush/synced --------------------------------------------------- +See <>. diff --git a/docs/reference/indices/forcemerge.asciidoc b/docs/reference/indices/forcemerge.asciidoc index 57c4796964f..520f29732a7 100644 --- a/docs/reference/indices/forcemerge.asciidoc +++ b/docs/reference/indices/forcemerge.asciidoc @@ -1,7 +1,30 @@ [[indices-forcemerge]] -=== Force Merge +=== Force merge API +++++ +Force merge +++++ -The force merge API allows you to force a <> on the +Forces a <> on the shards of one or more indices. + +[source,console] +---- +POST /twitter/_forcemerge +---- +// TEST[setup:twitter] + + +[[forcemerge-api-request]] +==== {api-request-title} + +`POST //_forcemerge` + +`POST /_forcemerge` + + +[[forcemerge-api-desc]] +==== {api-description-title} + +Use the force merge API to force a <> on the shards of one or more indices. Merging reduces the number of segments in each shard by merging some of them together, and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is @@ -15,21 +38,128 @@ mostly consist of deleted documents. This can cause very large segments to remain in the index which can result in increased disk usage and worse search performance. + +[[forcemerge-blocks]] +===== Blocks during a force merge + Calls to this API block until the merge is complete. If the client connection is lost before completion then the force merge process will continue in the background. Any new requests to force merge the same indices will also block until the ongoing force merge is complete. -[source,console] --------------------------------------------------- -POST /twitter/_forcemerge --------------------------------------------------- -// TEST[setup:twitter] -Force-merging can be useful with time-based indices and when using -<>. In these cases each index only receives -indexing traffic for a certain period of time, and once an index will receive -no more writes its shards can be force-merged down to a single segment: +[[forcemerge-multi-index]] +===== Force merging multiple indices + +The force merge API can be applied to more than one index with a single call, or +even on `_all` the indices. Multi index operations are executed one shard at a +time per node. Force merge makes the storage for the shard being merged +temporarily increase, up to double its size in case `max_num_segments` parameter +is set to `1`, as all segments need to be rewritten into a new one. + + +[[forcemerge-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] ++ +To force merge all indices in the cluster, +omit this parameter +or use a value of `_all` or `*`. + + +[[forcemerge-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `open`. + +`flush`:: +(Optional, boolean) +If `true`, +{es} performs a <> on the indices +after the force merge. +Defaults to `true`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +`max_num_segments`:: ++ +-- +(Optional, integer) +The number of segments to merge to. +To fully merge the index, +set it to `1`. + +Defaults to checking if a merge needs to execute. +If so, executes it. +-- + +`only_expunge_deletes`:: ++ +-- +(Optional, boolean) +If `true`, +only expunge segments containing document deletions. +Defaults to `false`. + +In Lucene, +a document is not deleted from a segment; +just marked as deleted. +During a merge, +a new segment is created +that does not contain those document deletions. + +NOTE: This parameter does *not* override the +`index.merge.policy.expunge_deletes_allowed` setting. +-- + + +[[forcemerge-api-example]] +==== {api-examples-title} + + +[[forcemerge-api-specific-ex]] +===== Force merge a specific index + +[source,console] +---- +POST /twitter/_forcemerge +---- +// TEST[continued] + + +[[forcemerge-api-multiple-ex]] +===== Force merge several indices + +[source,console] +---- +POST /kimchy,elasticsearch/_forcemerge +---- +// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] + + +[[forcemerge-api-all-ex]] +===== Force merge all indices + +[source,console] +---- +POST /_forcemerge +---- + + +[[forcemerge-api-time-based-index-ex]] +===== Time-based indices + +Force-merging is useful for time-based indices, +particularly when using <>. +In these cases, +each index only receives indexing traffic for a certain period of time. +Once an index receive no more writes, +its shards can be force-merged to a single segment. [source,console] -------------------------------------------------- @@ -40,49 +170,3 @@ POST /logs-000001/_forcemerge?max_num_segments=1 This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. - -[float] -[[forcemerge-parameters]] -==== Request Parameters - -The force merge API accepts the following request parameters: - -[horizontal] -`max_num_segments`:: The number of segments to merge to. To fully -merge the index, set it to `1`. Defaults to simply checking if a -merge needs to execute, and if so, executes it. - -`only_expunge_deletes`:: Should the merge process only expunge segments with -deletes in it. In Lucene, a document is not deleted from a segment, just marked -as deleted. During a merge process of segments, a new segment is created that -does not have those deletes. This flag allows to only merge segments that have -deletes. Defaults to `false`. Note that this won't override the -`index.merge.policy.expunge_deletes_allowed` threshold. - -`flush`:: Should a flush be performed after the forced merge. Defaults to -`true`. - -[source,console] --------------------------------------------------- -POST /kimchy/_forcemerge?only_expunge_deletes=false&max_num_segments=100&flush=true --------------------------------------------------- -// TEST[s/^/PUT kimchy\n/] - -[float] -[[forcemerge-multi-index]] -==== Multi Index - -The force merge API can be applied to more than one index with a single call, or -even on `_all` the indices. Multi index operations are executed one shard at a -time per node. Force merge makes the storage for the shard being merged -temporarily increase, up to double its size in case `max_num_segments` is set -to `1`, as all segments need to be rewritten into a new one. - - -[source,console] --------------------------------------------------- -POST /kimchy,elasticsearch/_forcemerge - -POST /_forcemerge --------------------------------------------------- -// TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] diff --git a/docs/reference/indices/get-alias.asciidoc b/docs/reference/indices/get-alias.asciidoc index 8b25e3c21ac..0f2c4e88e56 100644 --- a/docs/reference/indices/get-alias.asciidoc +++ b/docs/reference/indices/get-alias.asciidoc @@ -6,7 +6,7 @@ Returns information about one or more index aliases. -include::{docdir}/glossary.asciidoc[tag=index-alias-def] +include::{docdir}/glossary.asciidoc[tag=index-alias-desc] [source,console] ---- diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index 9bebc2388f1..371039643bd 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -32,6 +32,7 @@ PUT /_snapshot/my_repository # snapshot the index PUT /_snapshot/my_repository/snap_1?wait_for_completion=true +{"indices": "index1"} # delete the index DELETE index1 diff --git a/docs/reference/indices/synced-flush.asciidoc b/docs/reference/indices/synced-flush.asciidoc new file mode 100644 index 00000000000..35d360496fe --- /dev/null +++ b/docs/reference/indices/synced-flush.asciidoc @@ -0,0 +1,281 @@ +[[indices-synced-flush-api]] +=== Synced flush API +++++ +Synced flush +++++ + +Performs a synced flush on one or more indices. + +[source,console] +-------------------------------------------------- +POST /twitter/_flush/synced +-------------------------------------------------- +// TEST[setup:twitter] + + +[[synced-flush-api-request]] +==== {api-request-title} + +`POST //flush/synced` + +`GET //flush/synced` + +`POST /flush/synced` + +`GET /flush/synced` + + +[[synced-flush-api-desc]] +==== {api-description-title} + +[[synced-flush-using-api]] +===== Use the synced flush API + +Use the synced flush API to manually initiate a synced flush. +This can be useful for a planned cluster restart where +you can stop indexing but don't want to wait for 5 minutes until all indices +are marked as inactive and automatically sync-flushed. + +You can request a synced flush even if there is ongoing indexing activity, and +{es} will perform the synced flush on a "best-effort" basis: shards that do not +have any ongoing indexing activity will be successfully sync-flushed, and other +shards will fail to sync-flush. The successfully sync-flushed shards will have +faster recovery times as long as the `sync_id` marker is not removed by a +subsequent flush. + + +[[synced-flush-overview]] +===== Synced flush overview + +{es} keeps track of which shards have received indexing activity recently, and +considers shards that have not received any indexing operations for 5 minutes to +be inactive. + +When a shard becomes inactive {es} performs a special kind of flush +known as a *synced flush*. A synced flush performs a normal +<> on each replica of the shard, and then adds a marker known +as the `sync_id` to each replica to indicate that these copies have identical +Lucene indices. Comparing the `sync_id` markers of the two copies is a very +efficient way to check whether they have identical contents. + +When allocating shard replicas, {es} must ensure that each replica contains the +same data as the primary. If the shard copies have been synced-flushed and the +replica shares a `sync_id` with the primary then {es} knows that the two copies +have identical contents. This means there is no need to copy any segment files +from the primary to the replica, which saves a good deal of time during +recoveries and restarts. + +This is particularly useful for clusters having lots of indices which are very +rarely updated, such as with time-based indices. Without the synced flush +marker, recovery of this kind of cluster would be much slower. + + +[[synced-flush-sync-id-markers]] +===== Check for `sync_id` markers + +To check whether a shard has a `sync_id` marker or not, look for the `commit` +section of the shard stats returned by the <> API: + +[source,console] +-------------------------------------------------- +GET /twitter/_stats?filter_path=**.commit&level=shards <1> +-------------------------------------------------- +// TEST[s/^/PUT twitter\nPOST twitter\/_flush\/synced\n/] + +<1> `filter_path` is used to reduce the verbosity of the response, but is entirely optional + +The API returns the following response: + +[source,console-result] +-------------------------------------------------- +{ + "indices": { + "twitter": { + "shards": { + "0": [ + { + "commit" : { + "id" : "3M3zkw2GHMo2Y4h4/KFKCg==", + "generation" : 3, + "user_data" : { + "translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA", + "history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ", + "local_checkpoint" : "-1", + "translog_generation" : "2", + "max_seq_no" : "-1", + "sync_id" : "AVvFY-071siAOuFGEO9P", <1> + "max_unsafe_auto_id_timestamp" : "-1", + "min_retained_seq_no" : "0" + }, + "num_docs" : 0 + } + } + ] + } + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"id" : "3M3zkw2GHMo2Y4h4\/KFKCg=="/"id": $body.indices.twitter.shards.0.0.commit.id/] +// TESTRESPONSE[s/"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA"/"translog_uuid": $body.indices.twitter.shards.0.0.commit.user_data.translog_uuid/] +// TESTRESPONSE[s/"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ"/"history_uuid": $body.indices.twitter.shards.0.0.commit.user_data.history_uuid/] +// TESTRESPONSE[s/"sync_id" : "AVvFY-071siAOuFGEO9P"/"sync_id": $body.indices.twitter.shards.0.0.commit.user_data.sync_id/] +<1> the `sync id` marker + +NOTE: The `sync_id` marker is removed as soon as the shard is flushed again, and +{es} may trigger an automatic flush of a shard at any time if there are +unflushed operations in the shard's translog. In practice this means that one +should consider any indexing operation on an index as having removed its +`sync_id` markers. + + +[[synced-flush-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] ++ +To sync-flush all indices, +omit this parameter +or use a value of `_all` or `*`. + + +[[synced-flush-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `open`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + + +[[synced-flush-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +All shards successfully sync-flushed. + +`409`:: +A replica shard failed to sync-flush. + + +[[synced-flush-api-example]] +==== {api-examples-title} + + +[[synced-flush-api-specific-ex]] +===== Sync-flush a specific index + +[source,console] +---- +POST /kimchy/_flush +---- +// TEST[s/^/PUT kimchy\n/] + + +[[synced-flush-api-multi-ex]] +===== Synch-flush several indices + +[source,console] +-------------------------------------------------- +POST /kimchy,elasticsearch/_flush/synced +-------------------------------------------------- +// TEST[s/^/PUT elasticsearch\n/] +// TEST[continued] + + +[[synced-flush-api-all-ex]] +===== Sync-flush all indices + +[source,console] +-------------------------------------------------- +POST /_flush/synced +-------------------------------------------------- +// TEST[setup:twitter] + +The response contains details about how many shards were successfully +sync-flushed and information about any failure. + +The following response indicates two shards +and one replica shard +successfully sync-flushed: + +[source,console-result] +-------------------------------------------------- +{ + "_shards": { + "total": 2, + "successful": 2, + "failed": 0 + }, + "twitter": { + "total": 2, + "successful": 2, + "failed": 0 + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"successful": 2/"successful": 1/] + +The following response indicates one shard group failed +due to pending operations: + +[source,js] +-------------------------------------------------- +{ + "_shards": { + "total": 4, + "successful": 2, + "failed": 2 + }, + "twitter": { + "total": 4, + "successful": 2, + "failed": 2, + "failures": [ + { + "shard": 1, + "reason": "[2] ongoing operations on primary" + } + ] + } +} +-------------------------------------------------- +// NOTCONSOLE + +Sometimes the failures are specific to a shard replica. The copies that failed +will not be eligible for fast recovery but those that succeeded still will be. +This case is reported as follows: + +[source,js] +-------------------------------------------------- +{ + "_shards": { + "total": 4, + "successful": 1, + "failed": 1 + }, + "twitter": { + "total": 4, + "successful": 3, + "failed": 1, + "failures": [ + { + "shard": 1, + "reason": "unexpected error", + "routing": { + "state": "STARTED", + "primary": false, + "node": "SZNr2J_ORxKTLUCydGX4zA", + "relocating_node": null, + "shard": 1, + "index": "twitter" + } + } + ] + } +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/indices/templates.asciidoc b/docs/reference/indices/templates.asciidoc index fc0cba1ce36..0f33b21d013 100644 --- a/docs/reference/indices/templates.asciidoc +++ b/docs/reference/indices/templates.asciidoc @@ -248,4 +248,3 @@ The API returns the following response: } } -------------------------------------------------- -// TESTRESPONSE diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc index dd00cce7a55..71efd50c26e 100644 --- a/docs/reference/indices/update-settings.asciidoc +++ b/docs/reference/indices/update-settings.asciidoc @@ -53,14 +53,14 @@ Defaults to `false`. include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] -[[sample-api-query-params]] -==== {api-query-parms-title} +[[update-index-settings-api-request-body]] +==== {api-request-body-title} `settings`:: (Optional, <>) Configuration options for the index. See <>. -[[sample-api-example]] +[[update-index-settings-api-example]] ==== {api-examples-title} [[reset-index-setting]] diff --git a/docs/reference/ingest/processors/user-agent.asciidoc b/docs/reference/ingest/processors/user-agent.asciidoc index f0fb103109d..3e93f02abda 100644 --- a/docs/reference/ingest/processors/user-agent.asciidoc +++ b/docs/reference/ingest/processors/user-agent.asciidoc @@ -85,3 +85,14 @@ In practice, it will make most sense for any custom regex file to be a variant o or a customised version. The default file included in `ingest-user-agent` is the `regexes.yaml` from uap-core: https://github.com/ua-parser/uap-core/blob/master/regexes.yaml + +[[ingest-user-agent-settings]] +===== Node Settings + +The `user_agent` processor supports the following setting: + +`ingest.user_agent.cache_size`:: + + The maximum number of results that should be cached. Defaults to `1000`. + +Note that these settings are node settings and apply to all `user_agent` processors, i.e. there is one cache for all defined `user_agent` processors. diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc index 7f6689dc43a..5c03da1124f 100644 --- a/docs/reference/mapping/fields/field-names-field.asciidoc +++ b/docs/reference/mapping/fields/field-names-field.asciidoc @@ -14,7 +14,9 @@ be available but will not use the `_field_names` field. [[disable-field-names]] ==== Disabling `_field_names` -Disabling `_field_names` is often not necessary because it no longer +NOTE: Disabling `_field_names` has been deprecated and will be removed in a future major version. + +Disabling `_field_names` is usually not necessary because it no longer carries the index overhead it once did. If you have a lot of fields which have `doc_values` and `norms` disabled and you do not need to execute `exists` queries using those fields you might want to disable @@ -31,3 +33,4 @@ PUT tweets } } -------------------------------------------------- +// TEST[warning:Index [tweets] uses the deprecated `enabled` setting for `_field_names`. Disabling _field_names is not necessary because it no longer carries a large index overhead. Support for this setting will be removed in a future major version. Please remove it from your mappings and templates.] diff --git a/docs/reference/mapping/params/eager-global-ordinals.asciidoc b/docs/reference/mapping/params/eager-global-ordinals.asciidoc index 22dd13c8d75..49c5e5fce28 100644 --- a/docs/reference/mapping/params/eager-global-ordinals.asciidoc +++ b/docs/reference/mapping/params/eager-global-ordinals.asciidoc @@ -1,37 +1,52 @@ [[eager-global-ordinals]] === `eager_global_ordinals` -Global ordinals is a data-structure on top of doc values, that maintains an -incremental numbering for each unique term in a lexicographic order. Each -term has a unique number and the number of term 'A' is lower than the -number of term 'B'. Global ordinals are only supported with -<> and <> fields. In `keyword` fields, they -are available by default but `text` fields can only use them when `fielddata`, -with all of its associated baggage, is enabled. +==== What are global ordinals? -Doc values (and fielddata) also have ordinals, which is a unique numbering for -all terms in a particular segment and field. Global ordinals just build on top -of this, by providing a mapping between the segment ordinals and the global -ordinals, the latter being unique across the entire shard. Given that global -ordinals for a specific field are tied to _all the segments of a shard_, they -need to be entirely rebuilt whenever a once new segment becomes visible. +To support aggregations and other operations that require looking up field +values on a per-document basis, Elasticsearch uses a data structure called +<>. Term-based field types such as `keyword` store +their doc values using an ordinal mapping for a more compact representation. +This mapping works by assigning each term an incremental integer or 'ordinal' +based on its lexicographic order. The field's doc values store only the +ordinals for each document instead of the original terms, with a separate +lookup structure to convert between ordinals and terms. -Global ordinals are used for features that use segment ordinals, such as -the <>, -to improve the execution time. A terms aggregation relies purely on global -ordinals to perform the aggregation at the shard level, then converts global -ordinals to the real term only for the final reduce phase, which combines -results from different shards. +When used during aggregations, ordinals can greatly improve performance. As an +example, the `terms` aggregation relies only on ordinals to collect documents +into buckets at the shard-level, then converts the ordinals back to their +original term values when combining results across shards. -The loading time of global ordinals depends on the number of terms in a field, -but in general it is low, since it source field data has already been loaded. -The memory overhead of global ordinals is a small because it is very -efficiently compressed. +Each index segment defines its own ordinal mapping, but aggregations collect +data across an entire shard. So to be able to use ordinals for shard-level +operations like aggregations, Elasticsearch creates a unified mapping called +'global ordinals'. The global ordinal mapping is built on top of segment +ordinals, and works by maintaining a map from global ordinal to the local +ordinal for each segment. -By default, global ordinals are loaded at search-time, which is the right -trade-off if you are optimizing for indexing speed. However, if you are more -interested in search speed, it could be beneficial to set -`eager_global_ordinals: true` on fields that you plan to use in terms +Global ordinals are used if a search contains any of the following components: + +* Bucket aggregations on `keyword` and `flattened` fields. This includes +`terms` aggregations as mentioned above, as well as `composite`, `sampler`, +and `significant_terms`. +* Bucket aggregations on `text` fields that require <> +to be enabled. +* Operations on parent and child documents from a `join` field, including +`has_child` queries and `parent` aggregations. + +NOTE: The global ordinal mapping is an on-heap data structure. When measuring +memory usage, Elasticsearch counts the memory from global ordinals as +'fielddata'. Global ordinals memory is included in the +<>, and is returned +under `fielddata` in the <> response. + +==== Loading global ordinals + +The global ordinal mapping must be built before ordinals can be used during a +search. By default, the mapping is loaded during search on the first time that +global ordinals are needed. This is is the right approach if you are optimizing +for indexing speed, but if search performance is a priority, it's recommended +to eagerly load global ordinals eagerly on fields that will be used in aggregations: [source,console] @@ -48,29 +63,14 @@ PUT my_index/_mapping ------------ // TEST[s/^/PUT my_index\n/] -This will shift the cost of building the global ordinals from search-time to -refresh-time. Elasticsearch will make sure that global ordinals are built -before exposing to searches any changes to the content of the index. -Elasticsearch will also eagerly build global ordinals when starting a new copy -of a shard, such as when increasing the number of replicas or when relocating a -shard onto a new node. +When `eager_global_ordinals` is enabled, global ordinals are built when a shard +is <> -- Elasticsearch always loads them before +exposing changes to the content of the index. This shifts the cost of building +global ordinals from search to index-time. Elasticsearch will also eagerly +build global ordinals when creating a new copy of a shard, as can occur when +increasing the number of replicas or relocating a shard onto a new node. -If a shard has been <> down to a single -segment then its global ordinals are identical to the ordinals for its unique -segment, which means there is no extra cost for using global ordinals on such a -shard. Note that for performance reasons you should only force-merge an index -to which you will never write again. - -On a <>, global ordinals are discarded after each -search and rebuilt again on the next search if needed or if -`eager_global_ordinals` is set. This means `eager_global_ordinals` should not -be used on frozen indices. Instead, force-merge an index to a single segment -before freezing it so that global ordinals need not be built separately on each -search. - -If you ever decide that you do not need to run `terms` aggregations on this -field anymore, then you can disable eager loading of global ordinals at any -time: +Eager loading can be disabled at any time by updating the `eager_global_ordinals` setting: [source,console] ------------ @@ -85,3 +85,33 @@ PUT my_index/_mapping } ------------ // TEST[continued] + +IMPORTANT: On a <>, global ordinals are discarded +after each search and rebuilt again when they're requested. This means that +`eager_global_ordinals` should not be used on frozen indices: it would +cause global ordinals to be reloaded on every search. Instead, the index should +be force-merged to a single segment before being frozen. This avoids building +global ordinals altogether (more details can be found in the next section). + +==== Avoiding global ordinal loading + +Usually, global ordinals do not present a large overhead in terms of their +loading time and memory usage. However, loading global ordinals can be +expensive on indices with large shards, or if the fields contain a large +number of unique term values. Because global ordinals provide a unified mapping +for all segments on the shard, they also need to be rebuilt entirely when a new +segment becomes visible. + +In some cases it is possible to avoid global ordinal loading altogether: + +* The `terms`, `sampler`, and `significant_terms` aggregations support a +parameter +<> +that helps control how buckets are collected. It defaults to `global_ordinals`, +but can be set to `map` to instead use the term values directly. +* If a shard has been <> down to a single +segment, then its segment ordinals are already 'global' to the shard. In this +case, Elasticsearch does not need to build a global ordinal mapping and there +is no additional overhead from using global ordinals. Note that for performance +reasons you should only force-merge an index to which you will never write to +again. diff --git a/docs/reference/mapping/params/fielddata.asciidoc b/docs/reference/mapping/params/fielddata.asciidoc index df4e3756606..61be6500210 100644 --- a/docs/reference/mapping/params/fielddata.asciidoc +++ b/docs/reference/mapping/params/fielddata.asciidoc @@ -31,12 +31,10 @@ why fielddata is disabled by default. If you try to sort, aggregate, or access values from a script on a `text` field, you will see this exception: -[quote] --- +[literal] Fielddata is disabled on text fields by default. Set `fielddata=true` on [`your_field_name`] in order to load fielddata in memory by uninverting the inverted index. Note that this can however use significant memory. --- [[before-enabling-fielddata]] ==== Before enabling fielddata diff --git a/docs/reference/migration/migrate_7_0/discovery.asciidoc b/docs/reference/migration/migrate_7_0/discovery.asciidoc index 3e0b82f5997..f182a0c2d03 100644 --- a/docs/reference/migration/migrate_7_0/discovery.asciidoc +++ b/docs/reference/migration/migrate_7_0/discovery.asciidoc @@ -70,3 +70,17 @@ pings, each of which times out after 10 seconds. Thus a node that is unresponsive for longer than 30 seconds is liable to be removed from the cluster. Previously the default timeout for each ping was 30 seconds, so that an unresponsive node might be kept in the cluster for over 90 seconds. + +[float] +==== Master-ineligible nodes are ignored by discovery + +In earlier versions it was possible to use master-ineligible nodes during the +discovery process, either as seed nodes or to transfer discovery gossip +indirectly between the master-eligible nodes. Clusters that relied on +master-ineligible nodes like this were fragile and unable to automatically +recover from some kinds of failure. Discovery now involves only the +master-eligible nodes in the cluster so that it is not possible to rely on +master-ineligible nodes like this. You should configure +<> to provide the addresses of all the master-eligible nodes in +your cluster. diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index 95aaab91fd8..a8daee32efb 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -65,7 +65,7 @@ Adaptive replica selection has been enabled by default. If you wish to return to the older round robin of search requests, you can use the `cluster.routing.use_adaptive_replica_selection` setting: -[source,js] +[source,console] -------------------------------------------------- PUT /_cluster/settings { @@ -74,7 +74,7 @@ PUT /_cluster/settings } } -------------------------------------------------- -// CONSOLE + [float] [[search-api-returns-400-invalid-requests]] diff --git a/docs/reference/migration/migrate_7_4.asciidoc b/docs/reference/migration/migrate_7_4.asciidoc index f951c7d0556..cc54dd2fb7c 100644 --- a/docs/reference/migration/migrate_7_4.asciidoc +++ b/docs/reference/migration/migrate_7_4.asciidoc @@ -39,6 +39,12 @@ If a document doesn't have a value for a vector field (dense_vector or sparse_vector) on which a vector function is executed, an error will be thrown. +[discrete] +==== Use float instead of double for query vectors +Previously, vector functions like `cosineSimilarity` represented the query +vector as an list of doubles. Now vector functions use floats, which matches +how the stored document vectors are represented. + [discrete] [[breaking_74_snapshots_changes]] === Snapshot and Restore changes diff --git a/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc index a18894d7867..8094a8a8cb5 100644 --- a/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc @@ -82,14 +82,13 @@ are no matches or only partial matches. The following example stops the `datafeed-total-requests` {dfeed}: -[source,js] +[source,console] -------------------------------------------------- POST _ml/datafeeds/datafeed-total-requests/_stop { "timeout": "30s" } -------------------------------------------------- -// CONSOLE // TEST[skip:setup:server_metrics_startdf] When the {dfeed} stops, you receive the following results: diff --git a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc index eb9e0d7dcee..dd59da4af7c 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc @@ -102,7 +102,7 @@ see <>. The following example updates the query for the `datafeed-total-requests` {dfeed} so that only log entries of error level are analyzed: -[source,js] +[source,console] -------------------------------------------------- POST _ml/datafeeds/datafeed-total-requests/_update { @@ -113,7 +113,6 @@ POST _ml/datafeeds/datafeed-total-requests/_update } } -------------------------------------------------- -// CONSOLE // TEST[skip:setup:server_metrics_datafeed] When the {dfeed} is updated, you receive the full {dfeed} configuration with diff --git a/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc index d7fe0122e90..be939230971 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-filter.asciidoc @@ -44,7 +44,7 @@ Updates the description of a filter, adds items, or removes items. You can change the description, add and remove items to the `safe_domains` filter as follows: -[source,js] +[source,console] -------------------------------------------------- POST _ml/filters/safe_domains/_update { @@ -53,7 +53,6 @@ POST _ml/filters/safe_domains/_update "remove_items": ["wikipedia.org"] } -------------------------------------------------- -// CONSOLE // TEST[skip:setup:ml_filter_safe_domains] The API returns the following results: diff --git a/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc index e35ac438638..cb77d9e2733 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-job.asciidoc @@ -101,7 +101,7 @@ No other detector property can be updated. The following example updates the `total-requests` job: -[source,js] +[source,console] -------------------------------------------------- POST _ml/anomaly_detectors/total-requests/_update { @@ -125,7 +125,6 @@ POST _ml/anomaly_detectors/total-requests/_update } } -------------------------------------------------- -// CONSOLE // TEST[skip:setup:server_metrics_job] When the {anomaly-job} is updated, you receive a summary of the job diff --git a/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc index 90438439de0..beda52bf140 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-snapshot.asciidoc @@ -50,7 +50,7 @@ The following properties can be updated after the model snapshot is created: The following example updates the snapshot identified as `1491852978`: -[source,js] +[source,console] -------------------------------------------------- POST _ml/anomaly_detectors/it_ops_new_logs/model_snapshots/1491852978/_update @@ -59,7 +59,6 @@ _ml/anomaly_detectors/it_ops_new_logs/model_snapshots/1491852978/_update "retain": true } -------------------------------------------------- -// CONSOLE // TEST[skip:todo] When the snapshot is updated, you receive the following results: diff --git a/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc b/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc index e4116d04f91..daf8d7d4c69 100644 --- a/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/validate-detector.asciidoc @@ -37,7 +37,7 @@ see <>. The following example validates detector configuration information: -[source,js] +[source,console] -------------------------------------------------- POST _ml/anomaly_detectors/_validate/detector { @@ -46,7 +46,6 @@ POST _ml/anomaly_detectors/_validate/detector "by_field_name": "airline" } -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] When the validation completes, you receive the following results: diff --git a/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc index 2517205ad85..8b753b5d58c 100644 --- a/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/validate-job.asciidoc @@ -37,7 +37,7 @@ see <>. The following example validates job configuration information: -[source,js] +[source,console] -------------------------------------------------- POST _ml/anomaly_detectors/_validate { @@ -57,7 +57,6 @@ POST _ml/anomaly_detectors/_validate } } -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] When the validation is complete, you receive the following results: diff --git a/docs/reference/ml/anomaly-detection/detector-custom-rules.asciidoc b/docs/reference/ml/anomaly-detection/detector-custom-rules.asciidoc index fc00d11cea6..8cba67da926 100644 --- a/docs/reference/ml/anomaly-detection/detector-custom-rules.asciidoc +++ b/docs/reference/ml/anomaly-detection/detector-custom-rules.asciidoc @@ -31,7 +31,7 @@ _filters_ in {ml}. Filters can be shared across {anomaly-jobs}. We create our filter using the {ref}/ml-put-filter.html[put filter API]: -[source,js] +[source,console] ---------------------------------- PUT _ml/filters/safe_domains { @@ -39,13 +39,12 @@ PUT _ml/filters/safe_domains "items": ["safe.com", "trusted.com"] } ---------------------------------- -// CONSOLE // TEST[skip:needs-licence] Now, we can create our {anomaly-job} specifying a scope that uses the `safe_domains` filter for the `highest_registered_domain` field: -[source,js] +[source,console] ---------------------------------- PUT _ml/anomaly_detectors/dns_exfiltration_with_rule { @@ -71,21 +70,19 @@ PUT _ml/anomaly_detectors/dns_exfiltration_with_rule } } ---------------------------------- -// CONSOLE // TEST[skip:needs-licence] As time advances and we see more data and more results, we might encounter new domains that we want to add in the filter. We can do that by using the {ref}/ml-update-filter.html[update filter API]: -[source,js] +[source,console] ---------------------------------- POST _ml/filters/safe_domains/_update { "add_items": ["another-safe.com"] } ---------------------------------- -// CONSOLE // TEST[skip:setup:ml_filter_safe_domains] Note that we can use any of the `partition_field_name`, `over_field_name`, or @@ -93,7 +90,7 @@ Note that we can use any of the `partition_field_name`, `over_field_name`, or In the following example we scope multiple fields: -[source,js] +[source,console] ---------------------------------- PUT _ml/anomaly_detectors/scoping_multiple_fields { @@ -125,7 +122,6 @@ PUT _ml/anomaly_detectors/scoping_multiple_fields } } ---------------------------------- -// CONSOLE // TEST[skip:needs-licence] Such a detector will skip results when the values of all 3 scoped fields @@ -143,7 +139,7 @@ investigation. Let us now configure an {anomaly-job} with a rule that will skip results where CPU utilization is less than 0.20. -[source,js] +[source,console] ---------------------------------- PUT _ml/anomaly_detectors/cpu_with_rule { @@ -169,7 +165,6 @@ PUT _ml/anomaly_detectors/cpu_with_rule } } ---------------------------------- -// CONSOLE // TEST[skip:needs-licence] When there are multiple conditions they are combined with a logical `and`. @@ -179,7 +174,7 @@ a rule with two conditions, one for each end of the desired range. Here is an example where a count detector will skip results when the count is greater than 30 and less than 50: -[source,js] +[source,console] ---------------------------------- PUT _ml/anomaly_detectors/rule_with_range { @@ -209,7 +204,6 @@ PUT _ml/anomaly_detectors/rule_with_range } } ---------------------------------- -// CONSOLE // TEST[skip:needs-licence] ==== Custom rules in the life-cycle of a job diff --git a/docs/reference/ml/anomaly-detection/functions/count.asciidoc b/docs/reference/ml/anomaly-detection/functions/count.asciidoc index 02a9cd2b08e..fe81fc5f596 100644 --- a/docs/reference/ml/anomaly-detection/functions/count.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/count.asciidoc @@ -43,7 +43,7 @@ For more information about those properties, see {ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 1: Analyzing events with the count function -[source,js] +[source,console] -------------------------------------------------- PUT _ml/anomaly_detectors/example1 { @@ -58,7 +58,6 @@ PUT _ml/anomaly_detectors/example1 } } -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] This example is probably the simplest possible analysis. It identifies @@ -70,7 +69,7 @@ event rate and detects when the event rate is unusual compared to its past behavior. .Example 2: Analyzing errors with the high_count function -[source,js] +[source,console] -------------------------------------------------- PUT _ml/anomaly_detectors/example2 { @@ -87,7 +86,6 @@ PUT _ml/anomaly_detectors/example2 } } -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] If you use this `high_count` function in a detector in your {anomaly-job}, it @@ -96,7 +94,7 @@ unusually high count of error codes compared to other users. .Example 3: Analyzing status codes with the low_count function -[source,js] +[source,console] -------------------------------------------------- PUT _ml/anomaly_detectors/example3 { @@ -112,7 +110,6 @@ PUT _ml/anomaly_detectors/example3 } } -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] In this example, the function detects when the count of events for a @@ -123,7 +120,7 @@ event rate for each status code and detects when a status code has an unusually low count compared to its past behavior. .Example 4: Analyzing aggregated data with the count function -[source,js] +[source,console] -------------------------------------------------- PUT _ml/anomaly_detectors/example4 { @@ -139,7 +136,6 @@ PUT _ml/anomaly_detectors/example4 } } -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] If you are analyzing an aggregated `events_per_min` field, do not use a sum @@ -188,7 +184,7 @@ The `non_zero_count` function models only the following data: ======================================== .Example 5: Analyzing signatures with the high_non_zero_count function -[source,js] +[source,console] -------------------------------------------------- PUT _ml/anomaly_detectors/example5 { @@ -204,7 +200,6 @@ PUT _ml/anomaly_detectors/example5 } } -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] If you use this `high_non_zero_count` function in a detector in your @@ -242,7 +237,7 @@ For more information about those properties, see {ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 6: Analyzing users with the distinct_count function -[source,js] +[source,console] -------------------------------------------------- PUT _ml/anomaly_detectors/example6 { @@ -258,7 +253,6 @@ PUT _ml/anomaly_detectors/example6 } } -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] This `distinct_count` function detects when a system has an unusual number @@ -267,7 +261,7 @@ of logged in users. When you use this function in a detector in your distinct number of users is unusual compared to the past. .Example 7: Analyzing ports with the high_distinct_count function -[source,js] +[source,console] -------------------------------------------------- PUT _ml/anomaly_detectors/example7 { @@ -284,7 +278,6 @@ PUT _ml/anomaly_detectors/example7 } } -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] This example detects instances of port scanning. When you use this function in a diff --git a/docs/reference/ml/anomaly-detection/functions/geo.asciidoc b/docs/reference/ml/anomaly-detection/functions/geo.asciidoc index 7469bb963d4..20b8e6816ef 100644 --- a/docs/reference/ml/anomaly-detection/functions/geo.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/geo.asciidoc @@ -29,7 +29,7 @@ For more information about those properties, see {ref}/ml-job-resource.html#ml-detectorconfig[Detector configuration objects]. .Example 1: Analyzing transactions with the lat_long function -[source,js] +[source,console] -------------------------------------------------- PUT _ml/anomaly_detectors/example1 { @@ -46,7 +46,6 @@ PUT _ml/anomaly_detectors/example1 } } -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] If you use this `lat_long` function in a detector in your {anomaly-job}, it diff --git a/docs/reference/ml/anomaly-detection/stopping-ml.asciidoc b/docs/reference/ml/anomaly-detection/stopping-ml.asciidoc index 454d8497634..e91776e287a 100644 --- a/docs/reference/ml/anomaly-detection/stopping-ml.asciidoc +++ b/docs/reference/ml/anomaly-detection/stopping-ml.asciidoc @@ -23,11 +23,10 @@ When you stop a {dfeed}, it ceases to retrieve data from {es}. You can stop a {ref}/ml-stop-datafeed.html[stop {dfeeds} API]. For example, the following request stops the `feed1` {dfeed}: -[source,js] +[source,console] -------------------------------------------------- POST _ml/datafeeds/feed1/_stop -------------------------------------------------- -// CONSOLE // TEST[skip:setup:server_metrics_startdf] NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. @@ -44,11 +43,10 @@ A {dfeed} can be started and stopped multiple times throughout its lifecycle. If you are upgrading your cluster, you can use the following request to stop all {dfeeds}: -[source,js] +[source,console] ---------------------------------- POST _ml/datafeeds/_all/_stop ---------------------------------- -// CONSOLE // TEST[skip:needs-licence] [float] @@ -64,11 +62,10 @@ You can close a job by using the {ref}/ml-close-job.html[close {anomaly-job} API]. For example, the following request closes the `job1` job: -[source,js] +[source,console] -------------------------------------------------- POST _ml/anomaly_detectors/job1/_close -------------------------------------------------- -// CONSOLE // TEST[skip:setup:server_metrics_openjob] NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. @@ -84,9 +81,8 @@ lifecycle. If you are upgrading your cluster, you can use the following request to close all open {anomaly-jobs} on the cluster: -[source,js] +[source,console] ---------------------------------- POST _ml/anomaly_detectors/_all/_close ---------------------------------- -// CONSOLE // TEST[skip:needs-licence] diff --git a/docs/reference/ml/anomaly-detection/transforms.asciidoc b/docs/reference/ml/anomaly-detection/transforms.asciidoc index 3758c24c22a..6cda51caaa5 100644 --- a/docs/reference/ml/anomaly-detection/transforms.asciidoc +++ b/docs/reference/ml/anomaly-detection/transforms.asciidoc @@ -24,7 +24,7 @@ functions in one or more detectors. The following index APIs create and add content to an index that is used in subsequent examples: -[source,js] +[source,console] ---------------------------------- PUT /my_index { @@ -92,8 +92,8 @@ PUT /my_index/_doc/1 } } ---------------------------------- -// CONSOLE // TEST[skip:SETUP] + <1> In this example, string fields are mapped as `keyword` fields to support aggregation. If you want both a full text (`text`) and a keyword (`keyword`) version of the same field, use multi-fields. For more information, see @@ -101,7 +101,7 @@ version of the same field, use multi-fields. For more information, see [[ml-configuring-transform1]] .Example 1: Adding two numerical fields -[source,js] +[source,console] ---------------------------------- PUT _ml/anomaly_detectors/test1 { @@ -140,8 +140,8 @@ PUT _ml/datafeeds/datafeed-test1 } } ---------------------------------- -// CONSOLE // TEST[skip:needs-licence] + <1> A script field named `total_error_count` is referenced in the detector within the job. <2> The script field is defined in the {dfeed}. @@ -157,11 +157,10 @@ For more information, see You can preview the contents of the {dfeed} by using the following API: -[source,js] +[source,console] ---------------------------------- GET _ml/datafeeds/datafeed-test1/_preview ---------------------------------- -// CONSOLE // TEST[skip:continued] In this example, the API returns the following results, which contain a sum of @@ -210,7 +209,7 @@ that convert your strings to upper or lowercase letters. [[ml-configuring-transform2]] .Example 2: Concatenating strings -[source,js] +[source,console] -------------------------------------------------- PUT _ml/anomaly_detectors/test2 { @@ -251,8 +250,8 @@ PUT _ml/datafeeds/datafeed-test2 GET _ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] + <1> The script field has a rather generic name in this case, since it will be used for various tests in the subsequent examples. <2> The script field uses the plus (+) operator to concatenate strings. @@ -272,7 +271,7 @@ and "SMITH " have been concatenated and an underscore was added: [[ml-configuring-transform3]] .Example 3: Trimming strings -[source,js] +[source,console] -------------------------------------------------- POST _ml/datafeeds/datafeed-test2/_update { @@ -288,8 +287,8 @@ POST _ml/datafeeds/datafeed-test2/_update GET _ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- -// CONSOLE // TEST[skip:continued] + <1> This script field uses the `trim()` function to trim extra white space from a string. @@ -308,7 +307,7 @@ has been trimmed to "SMITH": [[ml-configuring-transform4]] .Example 4: Converting strings to lowercase -[source,js] +[source,console] -------------------------------------------------- POST _ml/datafeeds/datafeed-test2/_update { @@ -324,8 +323,8 @@ POST _ml/datafeeds/datafeed-test2/_update GET _ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- -// CONSOLE // TEST[skip:continued] + <1> This script field uses the `toLowerCase` function to convert a string to all lowercase letters. Likewise, you can use the `toUpperCase{}` function to convert a string to uppercase letters. @@ -345,7 +344,7 @@ has been converted to "joe": [[ml-configuring-transform5]] .Example 5: Converting strings to mixed case formats -[source,js] +[source,console] -------------------------------------------------- POST _ml/datafeeds/datafeed-test2/_update { @@ -361,8 +360,8 @@ POST _ml/datafeeds/datafeed-test2/_update GET _ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- -// CONSOLE // TEST[skip:continued] + <1> This script field is a more complicated example of case manipulation. It uses the `subString()` function to capitalize the first letter of a string and converts the remaining characters to lowercase. @@ -382,7 +381,7 @@ has been converted to "Joe": [[ml-configuring-transform6]] .Example 6: Replacing tokens -[source,js] +[source,console] -------------------------------------------------- POST _ml/datafeeds/datafeed-test2/_update { @@ -398,8 +397,8 @@ POST _ml/datafeeds/datafeed-test2/_update GET _ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- -// CONSOLE // TEST[skip:continued] + <1> This script field uses regular expressions to replace white space with underscores. @@ -418,7 +417,7 @@ The preview {dfeed} API returns the following results, which show that [[ml-configuring-transform7]] .Example 7: Regular expression matching and concatenation -[source,js] +[source,console] -------------------------------------------------- POST _ml/datafeeds/datafeed-test2/_update { @@ -434,8 +433,8 @@ POST _ml/datafeeds/datafeed-test2/_update GET _ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- -// CONSOLE // TEST[skip:continued] + <1> This script field looks for a specific regular expression pattern and emits the matched groups as a concatenated string. If no match is found, it emits an empty string. @@ -455,7 +454,7 @@ The preview {dfeed} API returns the following results, which show that [[ml-configuring-transform8]] .Example 8: Splitting strings by domain name -[source,js] +[source,console] -------------------------------------------------- PUT _ml/anomaly_detectors/test3 { @@ -499,7 +498,6 @@ PUT _ml/datafeeds/datafeed-test3 GET _ml/datafeeds/datafeed-test3/_preview -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] If you have a single field that contains a well-formed DNS domain name, you can @@ -527,7 +525,7 @@ The preview {dfeed} API returns the following results, which show that [[ml-configuring-transform9]] .Example 9: Transforming geo_point data -[source,js] +[source,console] -------------------------------------------------- PUT _ml/anomaly_detectors/test4 { @@ -567,7 +565,6 @@ PUT _ml/datafeeds/datafeed-test4 GET _ml/datafeeds/datafeed-test4/_preview -------------------------------------------------- -// CONSOLE // TEST[skip:needs-licence] In {es}, location data can be stored in `geo_point` fields but this data type is diff --git a/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc index d848271eea0..5f20b034152 100644 --- a/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/delete-dfanalytics.asciidoc @@ -34,11 +34,10 @@ information, see {stack-ov}/security-privileges.html[Security privileges] and The following example deletes the `loganalytics` {dfanalytics-job}: -[source,js] +[source,console] -------------------------------------------------- DELETE _ml/data_frame/analytics/loganalytics -------------------------------------------------- -// CONSOLE // TEST[skip:TBD] The API returns the following result: diff --git a/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc b/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc index 7ba480eb3d2..2b666a54022 100644 --- a/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc +++ b/docs/reference/ml/df-analytics/apis/dfanalyticsresources.asciidoc @@ -12,7 +12,8 @@ `analysis`:: (object) The type of analysis that is performed on the `source`. For example: - `outlier_detection`. For more information, see <>. + `outlier_detection` or `regression`. For more information, see + <>. `analyzed_fields`:: (object) You can specify both `includes` and/or `excludes` patterns. If @@ -28,7 +29,7 @@ from the analysis. -[source,js] +[source,console] -------------------------------------------------- PUT _ml/data_frame/analytics/loganalytics { @@ -48,7 +49,6 @@ PUT _ml/data_frame/analytics/loganalytics } } -------------------------------------------------- -// CONSOLE // TEST[setup:setup_logdata] `description`:: @@ -99,20 +99,21 @@ PUT _ml/data_frame/analytics/loganalytics ==== Analysis objects {dfanalytics-cap} resources contain `analysis` objects. For example, when you -create a {dfanalytics-job}, you must define the type of analysis it performs. -Currently, `outlier_detection` is the only available type of analysis, however, -other types will be added, for example `regression`. - +create a {dfanalytics-job}, you must define the type of analysis it performs. + [discrete] [[oldetection-resources]] ==== {oldetection-cap} configuration objects -An {oldetection} configuration object has the following properties: +An `outlier_detection` configuration object has the following properties: -`n_neighbors`:: - (integer) Defines the value for how many nearest neighbors each method of - {oldetection} will use to calculate its {olscore}. When the value is - not set, the system will dynamically detect an appropriate value. +`compute_feature_influence`:: + (boolean) If `true`, the feature influence calculation is enabled. Defaults to + `true`. + +`feature_influence_threshold`:: + (double) The minimum {olscore} that a document needs to have in order to + calculate its {fiscore}. Value range: 0-1 (`0.1` by default). `method`:: (string) Sets the method that {oldetection} uses. If the method is not set @@ -120,8 +121,140 @@ An {oldetection} configuration object has the following properties: combines their individual {olscores} to obtain the overall {olscore}. We recommend to use the ensemble method. Available methods are `lof`, `ldof`, `distance_kth_nn`, `distance_knn`. + + `n_neighbors`:: + (integer) Defines the value for how many nearest neighbors each method of + {oldetection} will use to calculate its {olscore}. When the value is not set, + different values will be used for different ensemble members. This helps + improve diversity in the ensemble. Therefore, only override this if you are + confident that the value you choose is appropriate for the data set. + +`outlier_fraction`:: + (double) Sets the proportion of the data set that is assumed to be outlying prior to + {oldetection}. For example, 0.05 means it is assumed that 5% of values are real outliers + and 95% are inliers. + +`standardize_columns`:: + (boolean) If `true`, then the following operation is performed on the columns + before computing outlier scores: (x_i - mean(x_i)) / sd(x_i). Defaults to + `true`. For more information, see + https://en.wikipedia.org/wiki/Feature_scaling#Standardization_(Z-score_Normalization)[this wiki page about standardization]. -`feature_influence_threshold`:: - (double) The minimum {olscore} that a document needs to have in order to - calculate its {fiscore}. - Value range: 0-1 (`0.1` by default). \ No newline at end of file + +[discrete] +[[regression-resources]] +==== {regression-cap} configuration objects + +[source,console] +-------------------------------------------------- +PUT _ml/data_frame/analytics/house_price_regression_analysis +{ + "source": { + "index": "houses_sold_last_10_yrs" <1> + }, + "dest": { + "index": "house_price_predictions" <2> + }, + "analysis": + { + "regression": { <3> + "dependent_variable": "price" <4> + } + } +} +-------------------------------------------------- +// TEST[skip:TBD] + +<1> Training data is taken from source index `houses_sold_last_10_yrs`. +<2> Analysis results will be output to destination index +`house_price_predictions`. +<3> The regression analysis configuration object. +<4> Regression analysis will use field `price` to train on. As no other +parameters have been specified it will train on 100% of eligible data, store its +prediction in destination index field `price_prediction` and use in-built +hyperparameter optimization to give minimum validation errors. + + +[float] +[[regression-resources-standard]] +===== Standard parameters + +`dependent_variable`:: + (Required, string) Defines which field of the {dataframe} is to be predicted. + This parameter is supplied by field name and must match one of the fields in + the index being used to train. If this field is missing from a document, then + that document will not be used for training, but a prediction with the trained + model will be generated for it. The data type of the field must be numeric. It + is also known as continuous target variable. + +`prediction_field_name`:: + (Optional, string) Defines the name of the prediction field in the results. + Defaults to `_prediction`. + +`training_percent`:: + (Optional, integer) Defines what percentage of the eligible documents that will + be used for training. Documents that are ignored by the analysis (for example + those that contain arrays) won’t be included in the calculation for used + percentage. Defaults to `100`. + + +[float] +[[regression-resources-advanced]] +===== Advanced parameters + +Advanced parameters are for fine-tuning {reganalysis}. They are set +automatically by <> +to give minimum validation error. It is highly recommended to use the default +values unless you fully understand the function of these parameters. If these +parameters are not supplied, their values are automatically tuned to give +minimum validation error. + +`eta`:: + (Optional, double) The shrinkage applied to the weights. Smaller values result + in larger forests which have better generalization error. However, the smaller + the value the longer the training will take. For more information, see + https://en.wikipedia.org/wiki/Gradient_boosting#Shrinkage[this wiki article] + about shrinkage. + +`feature_bag_fraction`:: + (Optional, double) Defines the fraction of features that will be used when + selecting a random bag for each candidate split. + +`maximum_number_trees`:: + (Optional, integer) Defines the maximum number of trees the forest is allowed + to contain. The maximum value is 2000. + +`gamma`:: + (Optional, double) Regularization parameter to prevent overfitting on the + training dataset. Multiplies a linear penalty associated with the size of + individual trees in the forest. The higher the value the more training will + prefer smaller trees. The smaller this parameter the larger individual trees + will be and the longer train will take. + +`lambda`:: + (Optional, double) Regularization parameter to prevent overfitting on the + training dataset. Multiplies an L2 regularisation term which applies to leaf + weights of the individual trees in the forest. The higher the value the more + training will attempt to keep leaf weights small. This makes the prediction + function smoother at the expense of potentially not being able to capture + relevant relationships between the features and the {depvar}. The smaller this + parameter the larger individual trees will be and the longer train will take. + + +[[ml-hyperparameter-optimization]] +===== Hyperparameter optimization + +If you don't supply {regression} parameters, hyperparameter optimization will be +performed by default to set a value for the undefined parameters. The starting +point is calculated for data dependent parameters by examining the loss on the +training data. Subject to the size constraint, this operation provides an upper +bound on the improvement in validation loss. + +A fixed number of rounds is used for optimization which depends on the number of +parameters being optimized. The optimitazion starts with random search, then +Bayesian Optimisation is performed that is targeting maximum expected +improvement. If you override any parameters, then the optimization will +calculate the value of the remaining parameters accordingly and use the value +you provided for the overridden parameter. The number of rounds are reduced +respectively. The validation error is estimated in each round by using 4-fold +cross validation. \ No newline at end of file diff --git a/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc index f91afb2196f..d91fdcaffbc 100644 --- a/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc @@ -54,7 +54,7 @@ Serves as an advice on how to set `model_memory_limit` when creating {dfanalytic [[ml-estimate-memory-usage-dfanalytics-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- POST _ml/data_frame/analytics/_estimate_memory_usage { @@ -68,7 +68,6 @@ POST _ml/data_frame/analytics/_estimate_memory_usage } } -------------------------------------------------- -// CONSOLE // TEST[skip:TBD] The API returns the following results: diff --git a/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc index e4d02c3a892..3c855b18289 100644 --- a/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc @@ -27,15 +27,11 @@ information, see {stack-ov}/security-privileges.html[Security privileges] and [[ml-evaluate-dfanalytics-desc]] ==== {api-description-title} -This API evaluates the executed analysis on an index that is already annotated -with a field that contains the results of the analytics (the `ground truth`) -for each {dataframe} row. +The API packages together commonly used evaluation metrics for various types of +machine learning features. This has been designed for use on indexes created by +{dfanalytics}. Evaluation requires both a ground truth field and an analytics +result field to be present. -Evaluation is typically done by calculating a set of metrics that capture various aspects of the quality of the results over the data for which you have the -`ground truth`. - -For different types of analyses different metrics are suitable. This API -packages together commonly used metrics for various analyses. [[ml-evaluate-dfanalytics-request-body]] ==== {api-request-body-title} @@ -45,15 +41,20 @@ packages together commonly used metrics for various analyses. performed. `query`:: - (Optional, object) Query used to select data from the index. - The {es} query domain-specific language (DSL). This value corresponds to the query - object in an {es} search POST body. By default, this property has the following - value: `{"match_all": {}}`. + (Optional, object) A query clause that retrieves a subset of data from the + source index. See <>. `evaluation`:: - (Required, object) Defines the type of evaluation you want to perform. For example: - `binary_soft_classification`. See <>. - + (Required, object) Defines the type of evaluation you want to perform. See + <>. ++ +-- +Available evaluation types: +* `binary_soft_classification` +* `regression` +-- + + //// [[ml-evaluate-dfanalytics-results]] ==== {api-response-body-title} @@ -74,7 +75,9 @@ packages together commonly used metrics for various analyses. [[ml-evaluate-dfanalytics-example]] ==== {api-examples-title} -[source,js] +===== Binary soft classification + +[source,console] -------------------------------------------------- POST _ml/data_frame/_evaluate { @@ -87,7 +90,6 @@ POST _ml/data_frame/_evaluate } } -------------------------------------------------- -// CONSOLE // TEST[skip:TBD] The API returns the following results: @@ -132,3 +134,40 @@ The API returns the following results: } } ---- + + +===== {regression-cap} + +[source,console] +-------------------------------------------------- +POST _ml/data_frame/_evaluate +{ + "index": "house_price_predictions", <1> + "query": { + "bool": { + "filter": [ + { "term": { "ml.is_training": false } } <2> + ] + } + }, + "evaluation": { + "regression": { + "actual_field": "price", <3> + "predicted_field": "ml.price_prediction", <4> + "metrics": { + "r_squared": {}, + "mean_squared_error": {} + } + } + } +} +-------------------------------------------------- +// TEST[skip:TBD] + +<1> The output destination index from a {dfanalytics} {reganalysis}. +<2> In this example, a test/train split (`training_percent`) was defined for the +{reganalysis}. This query limits evaluation to be performed on the test split +only. +<3> The ground truth value for the actual house price. This is required in order +to evaluate results. +<4> The predicted value for house price calculated by the {reganalysis}. diff --git a/docs/reference/ml/df-analytics/apis/evaluateresources.asciidoc b/docs/reference/ml/df-analytics/apis/evaluateresources.asciidoc index 186e54bb378..caf05f97c0b 100644 --- a/docs/reference/ml/df-analytics/apis/evaluateresources.asciidoc +++ b/docs/reference/ml/df-analytics/apis/evaluateresources.asciidoc @@ -12,7 +12,19 @@ Evaluation configuration objects relate to the <>. `evaluation`:: (object) Defines the type of evaluation you want to perform. The value of this object can be different depending on the type of evaluation you want to - perform. For example, it can contain <>. + perform. ++ +-- +Available evaluation types: +* `binary_soft_classification` +* `regression` +-- + +`query`:: + (object) A query clause that retrieves a subset of data from the source index. + See <>. The evaluation only applies to those documents of the index + that match the query. + [[binary-sc-resources]] ==== Binary soft classification configuration objects @@ -27,18 +39,18 @@ probability whether each row is an outlier. ===== {api-definitions-title} `actual_field`:: - (string) The field of the `index` which contains the `ground - truth`. The data type of this field can be boolean or integer. If the data - type is integer, the value has to be either `0` (false) or `1` (true). + (string) The field of the `index` which contains the `ground truth`. + The data type of this field can be boolean or integer. If the data type is + integer, the value has to be either `0` (false) or `1` (true). `predicted_probability_field`:: - (string) The field of the `index` that defines the probability of whether the - item belongs to the class in question or not. It's the field that contains the - results of the analysis. + (string) The field of the `index` that defines the probability of + whether the item belongs to the class in question or not. It's the field that + contains the results of the analysis. `metrics`:: - (object) Specifies the metrics that are used for the evaluation. Available - metrics: + (object) Specifies the metrics that are used for the evaluation. + Available metrics: `auc_roc`:: (object) The AUC ROC (area under the curve of the receiver operating @@ -60,4 +72,27 @@ probability whether each row is an outlier. (`tp` - true positive, `fp` - false positive, `tn` - true negative, `fn` - false negative) are calculated. Default value is {"at": [0.25, 0.50, 0.75]}. - \ No newline at end of file + + +[[regression-evaluation-resources]] +==== {regression-cap} evaluation objects + +{regression-cap} evaluation evaluates the results of a {regression} analysis +which outputs a prediction of values. + + +[discrete] +[[regression-evaluation-resources-properties]] +===== {api-definitions-title} + +`actual_field`:: + (string) The field of the `index` which contains the `ground truth`. The data + type of this field must be numerical. + +`predicted_field`:: + (string) The field in the `index` that contains the predicted value, + in other words the results of the {regression} analysis. + +`metrics`:: + (object) Specifies the metrics that are used for the evaluation. Available + metrics are `r_squared` and `mean_squared_error`. \ No newline at end of file diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc index 3afe309cbac..fd23d6be6ba 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc @@ -106,11 +106,10 @@ The API returns the following information: [[ml-get-dfanalytics-stats-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET _ml/data_frame/analytics/loganalytics/_stats -------------------------------------------------- -// CONSOLE // TEST[skip:TBD] diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc index fe79b4f5245..88b4526efca 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc @@ -90,11 +90,10 @@ when there are no matches or only partial matches. The following example gets configuration information for the `loganalytics` {dfanalytics-job}: -[source,js] +[source,console] -------------------------------------------------- GET _ml/data_frame/analytics/loganalytics -------------------------------------------------- -// CONSOLE // TEST[skip:TBD] The API returns the following results: diff --git a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc index 43231793578..f9884626ae5 100644 --- a/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/put-dfanalytics.asciidoc @@ -121,10 +121,13 @@ and mappings. [[ml-put-dfanalytics-example]] ==== {api-examples-title} +[[ml-put-dfanalytics-example-od]] +===== {oldetection-cap} example + The following example creates the `loganalytics` {dfanalytics-job}, the analysis type is `outlier_detection`: -[source,js] +[source,console] -------------------------------------------------- PUT _ml/data_frame/analytics/loganalytics { @@ -141,7 +144,6 @@ PUT _ml/data_frame/analytics/loganalytics } } -------------------------------------------------- -// CONSOLE // TEST[setup:setup_logdata] @@ -173,4 +175,64 @@ The API returns the following result: } ---- // TESTRESPONSE[s/1562351429434/$body.$_path/] -// TESTRESPONSE[s/"version" : "7.3.0"/"version" : $body.version/] \ No newline at end of file +// TESTRESPONSE[s/"version" : "7.3.0"/"version" : $body.version/] + + +[[ml-put-dfanalytics-example-r]] +===== {regression-cap} example + +The following example creates the `house_price_regression_analysis` { +dfanalytics-job}, the analysis type is `regression`: + +[source,console] +-------------------------------------------------- +PUT _ml/data_frame/analytics/house_price_regression_analysis +{ + "source": { + "index": "houses_sold_last_10_yrs" + }, + "dest": { + "index": "house_price_predictions" + }, + "analysis": + { + "regression": { + "dependent_variable": "price" + } + } +} +-------------------------------------------------- +// TEST[skip:TBD] + + +The API returns the following result: + +[source,console-result] +---- +{ + "id" : "house_price_regression_analysis", + "source" : { + "index" : [ + "houses_sold_last_10_yrs" + ], + "query" : { + "match_all" : { } + } + }, + "dest" : { + "index" : "house_price_predictions", + "results_field" : "ml" + }, + "analysis" : { + "regression" : { + "dependent_variable" : "price", + "training_percent" : 100 + } + }, + "model_memory_limit" : "1gb", + "create_time" : 1567168659127, + "version" : "8.0.0" +} +---- +// TESTRESPONSE[s/1567168659127/$body.$_path/] +// TESTRESPONSE[s/"version": "8.0.0"/"version": $body.version/] \ No newline at end of file diff --git a/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc index 8ca3dfaaafd..7dcd01f49f0 100644 --- a/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/start-dfanalytics.asciidoc @@ -46,11 +46,10 @@ and {stack-ov}/built-in-roles.html[Built-in roles]. The following example starts the `loganalytics` {dfanalytics-job}: -[source,js] +[source,console] -------------------------------------------------- POST _ml/data_frame/analytics/loganalytics/_start -------------------------------------------------- -// CONSOLE // TEST[skip:setup:logdata_job] When the {dfanalytics-job} starts, you receive the following results: diff --git a/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc index 87700245e58..0fc0a35b98f 100644 --- a/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc @@ -68,11 +68,10 @@ stop all {dfanalytics-job} by using _all or by specifying * as the The following example stops the `loganalytics` {dfanalytics-job}: -[source,js] +[source,console] -------------------------------------------------- POST _ml/data_frame/analytics/loganalytics/_stop -------------------------------------------------- -// CONSOLE // TEST[skip:TBD] When the {dfanalytics-job} stops, you receive the following results: diff --git a/docs/reference/modules/cluster/allocation_filtering.asciidoc b/docs/reference/modules/cluster/allocation_filtering.asciidoc index bf01b5fab83..5a39743cc85 100644 --- a/docs/reference/modules/cluster/allocation_filtering.asciidoc +++ b/docs/reference/modules/cluster/allocation_filtering.asciidoc @@ -18,7 +18,7 @@ The most common use case for cluster-level shard allocation filtering is when you want to decommission a node. To move shards off of a node prior to shutting it down, you could create a filter that excludes the node by its IP address: -[source,js] +[source,console] -------------------------------------------------- PUT _cluster/settings { @@ -27,7 +27,6 @@ PUT _cluster/settings } } -------------------------------------------------- -// CONSOLE [float] [[cluster-routing-settings]] @@ -57,7 +56,7 @@ The cluster allocation settings support the following built-in attributes: You can use wildcards when specifying attribute values, for example: -[source,js] +[source,console] ------------------------ PUT _cluster/settings { @@ -66,4 +65,3 @@ PUT _cluster/settings } } ------------------------ -// CONSOLE diff --git a/docs/reference/modules/cluster/disk_allocator.asciidoc b/docs/reference/modules/cluster/disk_allocator.asciidoc index 4ff1e5cca21..aa5ac15c455 100644 --- a/docs/reference/modules/cluster/disk_allocator.asciidoc +++ b/docs/reference/modules/cluster/disk_allocator.asciidoc @@ -52,14 +52,13 @@ threshold). An example of resetting the read-only index block on the `twitter` index: -[source,js] +[source,console] -------------------------------------------------- PUT /twitter/_settings { "index.blocks.read_only_allow_delete": null } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] -- @@ -88,7 +87,7 @@ An example of updating the low watermark to at least 100 gigabytes free, a high watermark of at least 50 gigabytes free, and a flood stage watermark of 10 gigabytes free, and updating the information about the cluster every minute: -[source,js] +[source,console] -------------------------------------------------- PUT _cluster/settings { @@ -100,4 +99,3 @@ PUT _cluster/settings } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 08568088baa..32803bf12bc 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -75,7 +75,7 @@ any key prefixed with `cluster.metadata.`. For example, to store the email address of the administrator of a cluster under the key `cluster.metadata.administrator`, issue this request: -[source,js] +[source,console] ------------------------------- PUT /_cluster/settings { @@ -84,7 +84,6 @@ PUT /_cluster/settings } } ------------------------------- -// CONSOLE IMPORTANT: User-defined cluster metadata is not intended to store sensitive or confidential information. Any information stored in user-defined cluster @@ -116,7 +115,7 @@ The settings which control logging can be updated dynamically with the `logger.` prefix. For instance, to increase the logging level of the `indices.recovery` module to `DEBUG`, issue this request: -[source,js] +[source,console] ------------------------------- PUT /_cluster/settings { @@ -125,7 +124,6 @@ PUT /_cluster/settings } } ------------------------------- -// CONSOLE [[persistent-tasks-allocation]] diff --git a/docs/reference/modules/cross-cluster-search.asciidoc b/docs/reference/modules/cross-cluster-search.asciidoc index f2f7fdfc666..0027ee1af93 100644 --- a/docs/reference/modules/cross-cluster-search.asciidoc +++ b/docs/reference/modules/cross-cluster-search.asciidoc @@ -21,7 +21,7 @@ To perform a {ccs}, you must have at least one remote cluster configured. The following <> API request adds three remote clusters:`cluster_one`, `cluster_two`, and `cluster_three`. -[source,js] +[source,console] -------------------------------- PUT _cluster/settings { @@ -48,7 +48,6 @@ PUT _cluster/settings } } -------------------------------- -// CONSOLE // TEST[setup:host] // TEST[s/127.0.0.1:930\d+/\${transport_host}/] @@ -59,7 +58,7 @@ PUT _cluster/settings The following <> API request searches the `twitter` index on a single remote cluster, `cluster_one`. -[source,js] +[source,console] -------------------------------------------------- GET /cluster_one:twitter/_search { @@ -70,7 +69,6 @@ GET /cluster_one:twitter/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] // TEST[setup:twitter] @@ -132,7 +130,7 @@ three clusters: * Your local cluster * Two remote clusters, `cluster_one` and `cluster_two` -[source,js] +[source,console] -------------------------------------------------- GET /twitter,cluster_one:twitter,cluster_two:twitter/_search { @@ -143,7 +141,6 @@ GET /twitter,cluster_one:twitter,cluster_two:twitter/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] The API returns the following response: @@ -235,7 +232,7 @@ To skip an unavailable cluster during a {ccs}, set the The following <> API request changes `cluster_two`'s `skip_unavailable` setting to `true`. -[source,js] +[source,console] -------------------------------- PUT _cluster/settings { @@ -244,7 +241,6 @@ PUT _cluster/settings } } -------------------------------- -// CONSOLE // TEST[continued] If `cluster_two` is disconnected or unavailable during a {ccs}, {es} won't diff --git a/docs/reference/modules/discovery/adding-removing-nodes.asciidoc b/docs/reference/modules/discovery/adding-removing-nodes.asciidoc index 2a62bb5e49d..9e316294497 100644 --- a/docs/reference/modules/discovery/adding-removing-nodes.asciidoc +++ b/docs/reference/modules/discovery/adding-removing-nodes.asciidoc @@ -60,7 +60,7 @@ without affecting the cluster's master-level availability. A node can be added to the voting configuration exclusion list using the <> API. For example: -[source,js] +[source,console] -------------------------------------------------- # Add node to voting configuration exclusions list and wait for the system # to auto-reconfigure the node out of the voting configuration up to the @@ -71,7 +71,6 @@ POST /_cluster/voting_config_exclusions/node_name # auto-reconfiguration up to one minute POST /_cluster/voting_config_exclusions/node_name?timeout=1m -------------------------------------------------- -// CONSOLE // TEST[skip:this would break the test cluster if executed] The node that should be added to the exclusions list is specified using @@ -104,11 +103,10 @@ reconfigure the voting configuration to remove that node and prevents it from returning to the voting configuration once it has removed. The current list of exclusions is stored in the cluster state and can be inspected as follows: -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/state?filter_path=metadata.cluster_coordination.voting_config_exclusions -------------------------------------------------- -// CONSOLE This list is limited in size by the `cluster.max_voting_config_exclusions` setting, which defaults to `10`. See <>. Since @@ -123,7 +121,7 @@ down permanently, its exclusion can be removed after it is shut down and removed from the cluster. Exclusions can also be cleared if they were created in error or were only required temporarily: -[source,js] +[source,console] -------------------------------------------------- # Wait for all the nodes with voting configuration exclusions to be removed from # the cluster and then remove all the exclusions, allowing any node to return to @@ -134,4 +132,3 @@ DELETE /_cluster/voting_config_exclusions # to return to the voting configuration in the future. DELETE /_cluster/voting_config_exclusions?wait_for_removal=false -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/modules/discovery/discovery.asciidoc b/docs/reference/modules/discovery/discovery.asciidoc index 2fa4e147014..e7d34c481d1 100644 --- a/docs/reference/modules/discovery/discovery.asciidoc +++ b/docs/reference/modules/discovery/discovery.asciidoc @@ -11,10 +11,10 @@ This process starts with a list of _seed_ addresses from one or more of any master-eligible nodes that were in the last-known cluster. The process operates in two phases: First, each node probes the seed addresses by connecting to each address and attempting to identify the node to which it is -connected. Secondly it shares with the remote node a list of all of its known -master-eligible peers and the remote node responds with _its_ peers in turn. -The node then probes all the new nodes that it just discovered, requests their -peers, and so on. +connected and to verify that it is master-eligible. Secondly, if successful, it +shares with the remote node a list of all of its known master-eligible peers +and the remote node responds with _its_ peers in turn. The node then probes all +the new nodes that it just discovered, requests their peers, and so on. If the node is not master-eligible then it continues this discovery process until it has discovered an elected master node. If no elected master is diff --git a/docs/reference/modules/discovery/voting.asciidoc b/docs/reference/modules/discovery/voting.asciidoc index 7c6ea0c1cc9..888620e331d 100644 --- a/docs/reference/modules/discovery/voting.asciidoc +++ b/docs/reference/modules/discovery/voting.asciidoc @@ -27,11 +27,10 @@ see <>. The current voting configuration is stored in the cluster state so you can inspect its current contents as follows: -[source,js] +[source,console] -------------------------------------------------- GET /_cluster/state?filter_path=metadata.cluster_coordination.last_committed_config -------------------------------------------------- -// CONSOLE NOTE: The current voting configuration is not necessarily the same as the set of all available master-eligible nodes in the cluster. Altering the voting diff --git a/docs/reference/modules/indices/request_cache.asciidoc b/docs/reference/modules/indices/request_cache.asciidoc index fc04c5e9c63..11c8180eb47 100644 --- a/docs/reference/modules/indices/request_cache.asciidoc +++ b/docs/reference/modules/indices/request_cache.asciidoc @@ -40,11 +40,10 @@ evicted. The cache can be expired manually with the <>: -[source,js] +[source,console] ------------------------ POST /kimchy,elasticsearch/_cache/clear?request=true ------------------------ -// CONSOLE // TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] [float] @@ -53,7 +52,7 @@ POST /kimchy,elasticsearch/_cache/clear?request=true The cache is enabled by default, but can be disabled when creating a new index as follows: -[source,js] +[source,console] ----------------------------- PUT /my_index { @@ -62,17 +61,15 @@ PUT /my_index } } ----------------------------- -// CONSOLE It can also be enabled or disabled dynamically on an existing index with the <> API: -[source,js] +[source,console] ----------------------------- PUT /my_index/_settings { "index.requests.cache.enable": true } ----------------------------- -// CONSOLE // TEST[continued] @@ -82,7 +79,7 @@ PUT /my_index/_settings The `request_cache` query-string parameter can be used to enable or disable caching on a *per-request* basis. If set, it overrides the index-level setting: -[source,js] +[source,console] ----------------------------- GET /my_index/_search?request_cache=true { @@ -96,7 +93,6 @@ GET /my_index/_search?request_cache=true } } ----------------------------- -// CONSOLE // TEST[continued] IMPORTANT: If your query uses a script whose result is not deterministic (e.g. @@ -140,16 +136,14 @@ setting is provided for completeness' sake only. The size of the cache (in bytes) and the number of evictions can be viewed by index, with the <> API: -[source,js] +[source,console] ------------------------ GET /_stats/request_cache?human ------------------------ -// CONSOLE or by node with the <> API: -[source,js] +[source,console] ------------------------ GET /_nodes/stats/indices/request_cache?human ------------------------ -// CONSOLE diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index c6bfb16c797..fc60623c739 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -99,7 +99,7 @@ For more information about the optional transport settings, see If you use <>, the remote clusters are available on every node in the cluster. For example: -[source,js] +[source,console] -------------------------------- PUT _cluster/settings { @@ -129,14 +129,13 @@ PUT _cluster/settings } } -------------------------------- -// CONSOLE // TEST[setup:host] // TEST[s/127.0.0.1:9300/\${transport_host}/] You can dynamically update the compression and ping schedule settings. However, you must re-include seeds in the settings update request. For example: -[source,js] +[source,console] -------------------------------- PUT _cluster/settings { @@ -160,7 +159,6 @@ PUT _cluster/settings } } -------------------------------- -// CONSOLE // TEST[continued] NOTE: When the compression or ping schedule settings change, all the existing @@ -169,7 +167,7 @@ fail. A remote cluster can be deleted from the cluster settings by setting its seeds and optional settings to `null` : -[source,js] +[source,console] -------------------------------- PUT _cluster/settings { @@ -188,8 +186,8 @@ PUT _cluster/settings } } -------------------------------- -// CONSOLE // TEST[continued] + <1> `cluster_two` would be removed from the cluster settings, leaving `cluster_one` and `cluster_three` intact. @@ -247,14 +245,6 @@ PUT _cluster/settings Elasticsearch compresses the response. If unset, the global `transport.compress` is used as the fallback setting. -`cluster.remote.${cluster_alias}.proxy`:: - - Sets a proxy address for the specified remote cluster. By default this is not - set, meaning that Elasticsearch will connect directly to the nodes in the - remote cluster using their <>. - If this setting is set to an IP address or hostname then Elasticsearch will - connect to the nodes in the remote cluster using this address instead. - [float] [[retrieve-remote-clusters-info]] === Retrieving remote clusters info diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index c68ca60ec3a..dbecf09e7dd 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -46,12 +46,14 @@ A snapshot contains a copy of the on-disk data structures that make up an index. This means that snapshots can only be restored to versions of Elasticsearch that can read the indices: +* A snapshot of an index created in 6.x can be restored to 7.x. * A snapshot of an index created in 5.x can be restored to 6.x. * A snapshot of an index created in 2.x can be restored to 5.x. * A snapshot of an index created in 1.x can be restored to 2.x. Conversely, snapshots of indices created in 1.x **cannot** be restored to 5.x -or 6.x, and snapshots of indices created in 2.x **cannot** be restored to 6.x. +or 6.x, snapshots of indices created in 2.x **cannot** be restored to 6.x +or 7.x, and snapshots of indices created in 5.x **cannot** be restored to 7.x. Each snapshot can contain indices created in various versions of Elasticsearch, and when restoring a snapshot it must be possible to restore all of the indices @@ -91,7 +93,7 @@ be corrupted. While setting the repository to `readonly` on all but one of the clusters should work with multiple clusters differing by one major version, it is not a supported configuration. -[source,js] +[source,console] ----------------------------------- PUT /_snapshot/my_backup { @@ -101,16 +103,14 @@ PUT /_snapshot/my_backup } } ----------------------------------- -// CONSOLE // TESTSETUP To retrieve information about a registered repository, use a GET request: -[source,js] +[source,console] ----------------------------------- GET /_snapshot/my_backup ----------------------------------- -// CONSOLE which returns: @@ -132,28 +132,25 @@ specifying repository names. For example, the following request retrieves information about all of the snapshot repositories that start with `repo` or contain `backup`: -[source,js] +[source,console] ----------------------------------- GET /_snapshot/repo*,*backup* ----------------------------------- -// CONSOLE To retrieve information about all registered snapshot repositories, omit the repository name or specify `_all`: -[source,js] +[source,console] ----------------------------------- GET /_snapshot ----------------------------------- -// CONSOLE or -[source,js] +[source,console] ----------------------------------- GET /_snapshot/_all ----------------------------------- -// CONSOLE [float] ===== Shared File System Repository @@ -182,7 +179,7 @@ path.repo: ["\\\\MY_SERVER\\Snapshots"] After all nodes are restarted, the following command can be used to register the shared file system repository with the name `my_fs_backup`: -[source,js] +[source,console] ----------------------------------- PUT /_snapshot/my_fs_backup { @@ -193,13 +190,12 @@ PUT /_snapshot/my_fs_backup } } ----------------------------------- -// CONSOLE // TEST[skip:no access to absolute path] If the repository location is specified as a relative path this path will be resolved against the first path specified in `path.repo`: -[source,js] +[source,console] ----------------------------------- PUT /_snapshot/my_fs_backup { @@ -210,7 +206,6 @@ PUT /_snapshot/my_fs_backup } } ----------------------------------- -// CONSOLE // TEST[continued] The following settings are supported: @@ -277,7 +272,7 @@ When you restore a source only snapshot: When you create a source repository, you must specify the type and name of the delegate repository where the snapshots will be stored: -[source,js] +[source,console] ----------------------------------- PUT _snapshot/my_src_only_repository { @@ -288,7 +283,6 @@ PUT _snapshot/my_src_only_repository } } ----------------------------------- -// CONSOLE // TEST[continued] [float] @@ -307,7 +301,7 @@ When a repository is registered, it's immediately verified on all master and dat on all nodes currently present in the cluster. The `verify` parameter can be used to explicitly disable the repository verification when registering or updating a repository: -[source,js] +[source,console] ----------------------------------- PUT /_snapshot/my_unverified_backup?verify=false { @@ -317,16 +311,14 @@ PUT /_snapshot/my_unverified_backup?verify=false } } ----------------------------------- -// CONSOLE // TEST[continued] The verification process can also be executed manually by running the following command: -[source,js] +[source,console] ----------------------------------- POST /_snapshot/my_unverified_backup/_verify ----------------------------------- -// CONSOLE // TEST[continued] It returns a list of nodes where repository was successfully verified or an error message if verification process failed. @@ -339,11 +331,10 @@ process. This unreferenced data does in no way negatively impact the performance than necessary storage use. In order to clean up this unreferenced data, users can call the cleanup endpoint for a repository which will trigger a complete accounting of the repositories contents and subsequent deletion of all unreferenced data that was found. -[source,js] +[source,console] ----------------------------------- POST /_snapshot/my_repository/_cleanup ----------------------------------- -// CONSOLE // TEST[continued] The response to a cleanup request looks as follows: @@ -374,11 +365,10 @@ A repository can contain multiple snapshots of the same cluster. Snapshots are i cluster. A snapshot with the name `snapshot_1` in the repository `my_backup` can be created by executing the following command: -[source,js] +[source,console] ----------------------------------- PUT /_snapshot/my_backup/snapshot_1?wait_for_completion=true ----------------------------------- -// CONSOLE // TEST[continued] The `wait_for_completion` parameter specifies whether or not the request should return immediately after snapshot @@ -389,7 +379,7 @@ even minutes) for this command to return even if the `wait_for_completion` param By default a snapshot of all open and started indices in the cluster is created. This behavior can be changed by specifying the list of indices in the body of the snapshot request. -[source,js] +[source,console] ----------------------------------- PUT /_snapshot/my_backup/snapshot_2?wait_for_completion=true { @@ -402,7 +392,6 @@ PUT /_snapshot/my_backup/snapshot_2?wait_for_completion=true } } ----------------------------------- -// CONSOLE // TEST[continued] The list of indices that should be included into the snapshot can be specified using the `indices` parameter that @@ -421,12 +410,12 @@ new indices. Note that special characters need to be URI encoded. For example, creating a snapshot with the current day in the name, like `snapshot-2018.05.11`, can be achieved with the following command: -[source,js] + +[source,console] ----------------------------------- # PUT /_snapshot/my_backup/ PUT /_snapshot/my_backup/%3Csnapshot-%7Bnow%2Fd%7D%3E ----------------------------------- -// CONSOLE // TEST[continued] @@ -452,11 +441,10 @@ filtering settings and rebalancing algorithm) once the snapshot is finished. Once a snapshot is created information about this snapshot can be obtained using the following command: -[source,sh] +[source,console] ----------------------------------- GET /_snapshot/my_backup/snapshot_1 ----------------------------------- -// CONSOLE // TEST[continued] This command returns basic information about the snapshot including start and end time, version of @@ -490,20 +478,18 @@ snapshot and the list of failures that occurred during the snapshot. The snapsho Similar as for repositories, information about multiple snapshots can be queried in one go, supporting wildcards as well: -[source,sh] +[source,console] ----------------------------------- GET /_snapshot/my_backup/snapshot_*,some_other_snapshot ----------------------------------- -// CONSOLE // TEST[continued] All snapshots currently stored in the repository can be listed using the following command: -[source,sh] +[source,console] ----------------------------------- GET /_snapshot/my_backup/_all ----------------------------------- -// CONSOLE // TEST[continued] The command fails if some of the snapshots are unavailable. The boolean parameter `ignore_unavailable` can be used to @@ -520,20 +506,18 @@ value of the `verbose` parameter is `true`. A currently running snapshot can be retrieved using the following command: -[source,sh] +[source,console] ----------------------------------- GET /_snapshot/my_backup/_current ----------------------------------- -// CONSOLE // TEST[continued] A snapshot can be deleted from the repository using the following command: -[source,sh] +[source,console] ----------------------------------- DELETE /_snapshot/my_backup/snapshot_2 ----------------------------------- -// CONSOLE // TEST[continued] When a snapshot is deleted from a repository, Elasticsearch deletes all files that are associated with the deleted @@ -544,11 +528,10 @@ started by mistake. A repository can be unregistered using the following command: -[source,sh] +[source,console] ----------------------------------- DELETE /_snapshot/my_backup ----------------------------------- -// CONSOLE // TEST[continued] When a repository is unregistered, Elasticsearch only removes the reference to the location where the repository is storing @@ -560,11 +543,10 @@ the snapshots. The snapshots themselves are left untouched and in place. A snapshot can be restored using the following command: -[source,sh] +[source,console] ----------------------------------- POST /_snapshot/my_backup/snapshot_1/_restore ----------------------------------- -// CONSOLE // TEST[continued] By default, all indices in the snapshot are restored, and the cluster state is @@ -579,7 +561,7 @@ http://docs.oracle.com/javase/6/docs/api/java/util/regex/Matcher.html#appendRepl Set `include_aliases` to `false` to prevent aliases from being restored together with associated indices -[source,js] +[source,console] ----------------------------------- POST /_snapshot/my_backup/snapshot_1/_restore { @@ -590,7 +572,6 @@ POST /_snapshot/my_backup/snapshot_1/_restore "rename_replacement": "restored_index_$1" } ----------------------------------- -// CONSOLE // TEST[continued] The restore operation can be performed on a functioning cluster. However, an @@ -618,7 +599,7 @@ restored in this case and all missing shards will be recreated empty. Most of index settings can be overridden during the restore process. For example, the following command will restore the index `index_1` without creating any replicas while switching back to default refresh interval: -[source,js] +[source,console] ----------------------------------- POST /_snapshot/my_backup/snapshot_1/_restore { @@ -631,7 +612,6 @@ POST /_snapshot/my_backup/snapshot_1/_restore ] } ----------------------------------- -// CONSOLE // TEST[continued] Please note, that some settings such as `index.number_of_shards` cannot be changed during restore operation. @@ -663,31 +643,28 @@ the global cluster state. A list of currently running snapshots with their detailed status information can be obtained using the following command: -[source,sh] +[source,console] ----------------------------------- GET /_snapshot/_status ----------------------------------- -// CONSOLE // TEST[continued] In this format, the command will return information about all currently running snapshots. By specifying a repository name, it's possible to limit the results to a particular repository: -[source,sh] +[source,console] ----------------------------------- GET /_snapshot/my_backup/_status ----------------------------------- -// CONSOLE // TEST[continued] If both repository name and snapshot id are specified, this command will return detailed status information for the given snapshot even if it's not currently running: -[source,sh] +[source,console] ----------------------------------- GET /_snapshot/my_backup/snapshot_1/_status ----------------------------------- -// CONSOLE // TEST[continued] The output looks similar to the following: @@ -739,11 +716,10 @@ in progress, there's also a `processed` section that contains information about Multiple ids are also supported: -[source,sh] +[source,console] ----------------------------------- GET /_snapshot/my_backup/snapshot_1,snapshot_2/_status ----------------------------------- -// CONSOLE // TEST[continued] [float] @@ -756,11 +732,10 @@ the simplest method that can be used to get notified about operation completion. The snapshot operation can be also monitored by periodic calls to the snapshot info: -[source,sh] +[source,console] ----------------------------------- GET /_snapshot/my_backup/snapshot_1 ----------------------------------- -// CONSOLE // TEST[continued] Please note that snapshot info operation uses the same resources and thread pool as the snapshot operation. So, @@ -769,11 +744,10 @@ for available resources before returning the result. On very large shards the wa To get more immediate and complete information about snapshots the snapshot status command can be used instead: -[source,sh] +[source,console] ----------------------------------- GET /_snapshot/my_backup/snapshot_1/_status ----------------------------------- -// CONSOLE // TEST[continued] While snapshot info method returns only basic information about the snapshot in progress, the snapshot status returns @@ -799,11 +773,10 @@ running snapshot was executed by mistake, or takes unusually long, it can be ter The snapshot delete operation checks if the deleted snapshot is currently running and if it does, the delete operation stops that snapshot before deleting the snapshot data from the repository. -[source,sh] +[source,console] ----------------------------------- DELETE /_snapshot/my_backup/snapshot_1 ----------------------------------- -// CONSOLE // TEST[continued] The restore operation uses the standard shard recovery mechanism. Therefore, any currently running restore operation can diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index f37d886da78..d8360fa7621 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -154,7 +154,7 @@ request was uncompressed--even when compression is enabled. The transport module has a dedicated tracer logger which, when activated, logs incoming and out going requests. The log can be dynamically activated by settings the level of the `org.elasticsearch.transport.TransportService.tracer` logger to `TRACE`: -[source,js] +[source,console] -------------------------------------------------- PUT _cluster/settings { @@ -163,12 +163,11 @@ PUT _cluster/settings } } -------------------------------------------------- -// CONSOLE You can also control which actions will be traced, using a set of include and exclude wildcard patterns. By default every request will be traced except for fault detection pings: -[source,js] +[source,console] -------------------------------------------------- PUT _cluster/settings { @@ -178,6 +177,3 @@ PUT _cluster/settings } } -------------------------------------------------- -// CONSOLE - - diff --git a/docs/reference/monitoring/collecting-monitoring-data.asciidoc b/docs/reference/monitoring/collecting-monitoring-data.asciidoc index 8ab0443edb1..3d5a85b3012 100644 --- a/docs/reference/monitoring/collecting-monitoring-data.asciidoc +++ b/docs/reference/monitoring/collecting-monitoring-data.asciidoc @@ -53,7 +53,7 @@ view the cluster settings and `manage` cluster privileges to change them. For example, use the following APIs to review and change this setting: -[source,js] +[source,console] ---------------------------------- GET _cluster/settings @@ -64,7 +64,6 @@ PUT _cluster/settings } } ---------------------------------- -// CONSOLE Alternatively, you can enable this setting in {kib}. In the side navigation, click *Monitoring*. If data collection is disabled, you are prompted to turn it diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index c768a778205..34f027b1553 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -28,7 +28,7 @@ production cluster. By default, it is is disabled (`false`). You can use the following APIs to review and change this setting: -[source,js] +[source,console] ---------------------------------- GET _cluster/settings @@ -38,8 +38,7 @@ PUT _cluster/settings "xpack.monitoring.collection.enabled": true } } ----------------------------------- -// CONSOLE +---------------------------------- If {es} {security-features} are enabled, you must have `monitor` cluster privileges to view the cluster settings and `manage` cluster privileges to change them. @@ -194,7 +193,7 @@ production cluster. You can use the following API to change this setting: -[source,js] +[source,console] ---------------------------------- PUT _cluster/settings { @@ -203,7 +202,6 @@ PUT _cluster/settings } } ---------------------------------- -// CONSOLE If {es} {security-features} are enabled, you must have `monitor` cluster privileges to view the cluster settings and `manage` cluster privileges diff --git a/docs/reference/monitoring/indices.asciidoc b/docs/reference/monitoring/indices.asciidoc index 34cbced1c43..6586a945b5d 100644 --- a/docs/reference/monitoring/indices.asciidoc +++ b/docs/reference/monitoring/indices.asciidoc @@ -8,11 +8,10 @@ that store the monitoring data collected from a cluster. You can retrieve the templates through the `_template` API: -[source,sh] +[source,console] ---------------------------------- GET /_template/.monitoring-* ---------------------------------- -// CONSOLE By default, the template configures one shard and one replica for the monitoring indices. To override the default settings, add your own template: @@ -26,7 +25,7 @@ section. For example, the following template increases the number of shards to five and the number of replicas to two. -[source,js] +[source,console] ---------------------------------- PUT /_template/custom_monitoring { @@ -38,7 +37,6 @@ PUT /_template/custom_monitoring } } ---------------------------------- -// CONSOLE IMPORTANT: Only set the `number_of_shards` and `number_of_replicas` in the settings section. Overriding other monitoring template settings could cause diff --git a/docs/reference/query-dsl/bool-query.asciidoc b/docs/reference/query-dsl/bool-query.asciidoc index 1d6edfdbd0c..2d84ff05741 100644 --- a/docs/reference/query-dsl/bool-query.asciidoc +++ b/docs/reference/query-dsl/bool-query.asciidoc @@ -32,7 +32,7 @@ The `bool` query takes a _more-matches-is-better_ approach, so the score from each matching `must` or `should` clause will be added together to provide the final `_score` for each document. -[source,js] +[source,console] -------------------------------------------------- POST _search { @@ -59,7 +59,6 @@ POST _search } } -------------------------------------------------- -// CONSOLE [[score-bool-filter]] ==== Scoring with `bool.filter` @@ -72,7 +71,7 @@ all documents where the `status` field contains the term `active`. This first query assigns a score of `0` to all documents, as no scoring query has been specified: -[source,js] +[source,console] --------------------------------- GET _search { @@ -87,12 +86,11 @@ GET _search } } --------------------------------- -// CONSOLE This `bool` query has a `match_all` query, which assigns a score of `1.0` to all documents. -[source,js] +[source,console] --------------------------------- GET _search { @@ -110,13 +108,12 @@ GET _search } } --------------------------------- -// CONSOLE This `constant_score` query behaves in exactly the same way as the second example above. The `constant_score` query assigns a score of `1.0` to all documents matched by the filter. -[source,js] +[source,console] --------------------------------- GET _search { @@ -131,7 +128,6 @@ GET _search } } --------------------------------- -// CONSOLE ==== Using named queries to see which clauses matched diff --git a/docs/reference/query-dsl/boosting-query.asciidoc b/docs/reference/query-dsl/boosting-query.asciidoc index 4f2f60e5e22..d6427d5d9a2 100644 --- a/docs/reference/query-dsl/boosting-query.asciidoc +++ b/docs/reference/query-dsl/boosting-query.asciidoc @@ -14,7 +14,7 @@ excluding them from the search results. [[boosting-query-ex-request]] ==== Example request -[source,js] +[source,console] ---- GET /_search { @@ -35,7 +35,6 @@ GET /_search } } ---- -// CONSOLE [[boosting-top-level-params]] ==== Top-level parameters for `boosting` diff --git a/docs/reference/query-dsl/common-terms-query.asciidoc b/docs/reference/query-dsl/common-terms-query.asciidoc index f2d784eb0c4..ff38af303b5 100644 --- a/docs/reference/query-dsl/common-terms-query.asciidoc +++ b/docs/reference/query-dsl/common-terms-query.asciidoc @@ -70,7 +70,7 @@ as stopwords without the need to maintain a manual list. In this example, words that have a document frequency greater than 0.1% (eg `"this"` and `"is"`) will be treated as _common terms_. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -84,7 +84,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] The number of terms which should match can be controlled with the @@ -95,7 +94,7 @@ The number of terms which should match can be controlled with the For low frequency terms, set the `low_freq_operator` to `"and"` to make all terms required: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -110,12 +109,11 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] which is roughly equivalent to: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -135,14 +133,13 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Alternatively use <> to specify a minimum number or percentage of low frequency terms which must be present, for instance: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -157,12 +154,11 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] which is roughly equivalent to: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -187,7 +183,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE A different <> @@ -195,7 +190,7 @@ can be applied for low and high frequency terms with the additional `low_freq` and `high_freq` parameters. Here is an example when providing additional parameters (note the change in structure): -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -213,12 +208,11 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] which is roughly equivalent to: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -249,7 +243,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE In this case it means the high frequency terms have only an impact on relevance when there are at least three of them. But the most @@ -257,7 +250,7 @@ interesting use of the <> for high frequency terms is when there are only high frequency terms: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -275,12 +268,11 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[warning:Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]] which is roughly equivalent to: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -297,7 +289,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The high frequency generated query is then slightly less restrictive than with an `AND`. diff --git a/docs/reference/query-dsl/constant-score-query.asciidoc b/docs/reference/query-dsl/constant-score-query.asciidoc index dd55e38b3b0..4fc7c1ab7e1 100644 --- a/docs/reference/query-dsl/constant-score-query.asciidoc +++ b/docs/reference/query-dsl/constant-score-query.asciidoc @@ -8,7 +8,7 @@ Wraps a <> and returns every matching document with a <> equal to the `boost` parameter value. -[source,js] +[source,console] ---- GET /_search { @@ -22,7 +22,6 @@ GET /_search } } ---- -// CONSOLE [[constant-score-top-level-params]] ==== Top-level parameters for `constant_score` diff --git a/docs/reference/query-dsl/dis-max-query.asciidoc b/docs/reference/query-dsl/dis-max-query.asciidoc index 57c012802d0..ff6590eb663 100644 --- a/docs/reference/query-dsl/dis-max-query.asciidoc +++ b/docs/reference/query-dsl/dis-max-query.asciidoc @@ -17,7 +17,7 @@ You can use the `dis_max` to search for a term in fields mapped with different [[query-dsl-dis-max-query-ex-request]] ==== Example request -[source,js] +[source,console] ---- GET /_search { @@ -32,7 +32,6 @@ GET /_search } } ---- -// CONSOLE [[query-dsl-dis-max-query-top-level-params]] ==== Top-level parameters for `dis_max` diff --git a/docs/reference/query-dsl/distance-feature-query.asciidoc b/docs/reference/query-dsl/distance-feature-query.asciidoc index 05543fca80f..05ce0873f58 100644 --- a/docs/reference/query-dsl/distance-feature-query.asciidoc +++ b/docs/reference/query-dsl/distance-feature-query.asciidoc @@ -33,7 +33,7 @@ following example. * `production_date`, a <> field * `location`, a <> field -[source,js] +[source,console] ---- PUT /items { @@ -52,14 +52,13 @@ PUT /items } } ---- -// CONSOLE // TESTSETUP -- . Index several documents to this index. + -- -[source,js] +[source,console] ---- PUT /items/_doc/1?refresh { @@ -83,7 +82,6 @@ PUT /items/_doc/3?refresh "location": [-71.3, 41.12] } ---- -// CONSOLE -- @@ -96,7 +94,7 @@ The following `bool` search returns documents with a `name` value of `chocolate`. The search also uses the `distance_feature` query to increase the relevance score of documents with a `production_date` value closer to `now`. -[source,js] +[source,console] ---- GET /items/_search { @@ -118,7 +116,6 @@ GET /items/_search } } ---- -// CONSOLE [[distance-feature-query-distance-ex]] ====== Boost documents based on location @@ -126,7 +123,7 @@ The following `bool` search returns documents with a `name` value of `chocolate`. The search also uses the `distance_feature` query to increase the relevance score of documents with a `location` value closer to `[-71.3, 41.15]`. -[source,js] +[source,console] ---- GET /items/_search { @@ -148,7 +145,6 @@ GET /items/_search } } ---- -// CONSOLE [[distance-feature-top-level-params]] diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index e70d95da411..539e7208e00 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -16,7 +16,7 @@ An indexed value may not exist for a document's field due to a variety of reason [[exists-query-ex-request]] ==== Example request -[source,js] +[source,console] ---- GET /_search { @@ -27,7 +27,6 @@ GET /_search } } ---- -// CONSOLE [[exists-query-top-level-params]] ==== Top-level parameters for `exists` @@ -53,7 +52,7 @@ query. The following search returns documents that are missing an indexed value for the `user` field. -[source,js] +[source,console] ---- GET /_search { @@ -68,4 +67,3 @@ GET /_search } } ---- -// CONSOLE \ No newline at end of file diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index d5f20a57dc2..12cc2c13971 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -15,7 +15,7 @@ by the query. `function_score` can be used with only one function like this: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -29,7 +29,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] <1> See <> for a list of supported functions. @@ -38,7 +37,7 @@ Furthermore, several functions can be combined. In this case one can optionally choose to apply the function only if a document matches a given filtering query -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -65,7 +64,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] <1> Boost for the whole query. @@ -135,7 +133,7 @@ the scoring of it optionally with a computation derived from other numeric field values in the doc using a script expression. Here is a simple sample: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -153,7 +151,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] NOTE: Scores produced by the `script_score` function must be non-negative, @@ -167,7 +164,7 @@ Scripts compilation is cached for faster execution. If the script has parameters that it needs to take into account, it is preferable to reuse the same script, and provide parameters to it: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -189,7 +186,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Note that unlike the `custom_score` query, the @@ -234,7 +230,7 @@ NOTE: It was possible to set a seed without setting a field, but this has been deprecated as this requires loading fielddata on the `_id` field which consumes a lot of memory. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -248,7 +244,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [[function-field-value-factor]] @@ -263,7 +258,7 @@ As an example, imagine you have a document indexed with a numeric `likes` field and wish to influence the score of a document with this field, an example doing so would look like: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -279,7 +274,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Which will translate into the following formula for scoring: @@ -375,7 +369,7 @@ this case. If your field is a date field, you can set `scale` and `offset` as days, weeks, and so on. Example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -393,8 +387,8 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] + <1> The date format of the origin depends on the <> defined in your mapping. If you do not define the origin, the current time is used. <2> The `offset` and `decay` parameters are optional. @@ -573,7 +567,7 @@ and for `location`: Suppose you want to multiply these two functions on the original score, the request would look like this: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -607,7 +601,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Next, we show how the computed score looks like for each of the three possible decay functions. diff --git a/docs/reference/query-dsl/fuzzy-query.asciidoc b/docs/reference/query-dsl/fuzzy-query.asciidoc index 06e6095361b..bb20e0bd7e7 100644 --- a/docs/reference/query-dsl/fuzzy-query.asciidoc +++ b/docs/reference/query-dsl/fuzzy-query.asciidoc @@ -25,7 +25,7 @@ The query then returns exact matches for each expansion. [[fuzzy-query-ex-simple]] ===== Simple example -[source,js] +[source,console] ---- GET /_search { @@ -38,12 +38,11 @@ GET /_search } } ---- -// CONSOLE [[fuzzy-query-ex-advanced]] ===== Example using advanced parameters -[source,js] +[source,console] ---- GET /_search { @@ -61,7 +60,6 @@ GET /_search } } ---- -// CONSOLE [[fuzzy-query-top-level-params]] ==== Top-level parameters for `fuzzy` diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index b0523e09a3a..a51283ceeb3 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -7,7 +7,7 @@ A query allowing to filter hits based on a point location using a bounding box. Assuming the following indexed document: -[source,js] +[source,console] -------------------------------------------------- PUT /my_locations { @@ -34,13 +34,12 @@ PUT /my_locations/_doc/1 } } -------------------------------------------------- -// CONSOLE // TESTSETUP Then the following simple query can be executed with a `geo_bounding_box` filter: -[source,js] +[source,console] -------------------------------------------------- GET my_locations/_search { @@ -67,7 +66,6 @@ GET my_locations/_search } } -------------------------------------------------- -// CONSOLE [float] ==== Query Options @@ -95,7 +93,7 @@ representations of the geo point, the filter can accept it as well: [float] ===== Lat Lon As Properties -[source,js] +[source,console] -------------------------------------------------- GET my_locations/_search { @@ -122,7 +120,6 @@ GET my_locations/_search } } -------------------------------------------------- -// CONSOLE [float] ===== Lat Lon As Array @@ -130,7 +127,7 @@ GET my_locations/_search Format in `[lon, lat]`, note, the order of lon/lat here in order to conform with http://geojson.org/[GeoJSON]. -[source,js] +[source,console] -------------------------------------------------- GET my_locations/_search { @@ -151,14 +148,13 @@ GET my_locations/_search } } -------------------------------------------------- -// CONSOLE [float] ===== Lat Lon As String Format in `lat,lon`. -[source,js] +[source,console] -------------------------------------------------- GET my_locations/_search { @@ -179,12 +175,11 @@ GET my_locations/_search } } -------------------------------------------------- -// CONSOLE [float] ===== Bounding Box as Well-Known Text (WKT) -[source,js] +[source,console] -------------------------------------------------- GET my_locations/_search { @@ -204,12 +199,11 @@ GET my_locations/_search } } -------------------------------------------------- -// CONSOLE [float] ===== Geohash -[source,js] +[source,console] -------------------------------------------------- GET my_locations/_search { @@ -230,7 +224,6 @@ GET my_locations/_search } } -------------------------------------------------- -// CONSOLE When geohashes are used to specify the bounding the edges of the @@ -244,7 +237,7 @@ In order to specify a bounding box that would match entire area of a geohash the geohash can be specified in both `top_left` and `bottom_right` parameters: -[source,js] +[source,console] -------------------------------------------------- GET my_locations/_search { @@ -258,7 +251,6 @@ GET my_locations/_search } } -------------------------------------------------- -// CONSOLE In this example, the geohash `dr` will produce the bounding box query with the top left corner at `45.0,-78.75` and the bottom right @@ -274,7 +266,7 @@ are supported. Instead of setting the values pairwise, one can use the simple names `top`, `left`, `bottom` and `right` to set the values separately. -[source,js] +[source,console] -------------------------------------------------- GET my_locations/_search { @@ -297,7 +289,6 @@ GET my_locations/_search } } -------------------------------------------------- -// CONSOLE [float] @@ -324,7 +315,7 @@ that the `geo_point` type must have lat and lon indexed in this case). Note, when using the indexed option, multi locations per document field are not supported. Here is an example: -[source,js] +[source,console] -------------------------------------------------- GET my_locations/_search { @@ -352,7 +343,6 @@ GET my_locations/_search } } -------------------------------------------------- -// CONSOLE [float] ==== Ignore Unmapped diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index 7a7f749687e..576334da131 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -8,7 +8,7 @@ Filters documents that include only hits that exists within a specific distance from a geo point. Assuming the following mapping and indexed document: -[source,js] +[source,console] -------------------------------------------------- PUT /my_locations { @@ -35,14 +35,13 @@ PUT /my_locations/_doc/1 } } -------------------------------------------------- -// CONSOLE // TESTSETUP Then the following simple query can be executed with a `geo_distance` filter: -[source,js] +[source,console] -------------------------------------------------- GET /my_locations/_search { @@ -64,7 +63,6 @@ GET /my_locations/_search } } -------------------------------------------------- -// CONSOLE [float] ==== Accepted Formats @@ -75,7 +73,7 @@ representations of the geo point, the filter can accept it as well: [float] ===== Lat Lon As Properties -[source,js] +[source,console] -------------------------------------------------- GET /my_locations/_search { @@ -97,7 +95,6 @@ GET /my_locations/_search } } -------------------------------------------------- -// CONSOLE [float] ===== Lat Lon As Array @@ -105,7 +102,7 @@ GET /my_locations/_search Format in `[lon, lat]`, note, the order of lon/lat here in order to conform with http://geojson.org/[GeoJSON]. -[source,js] +[source,console] -------------------------------------------------- GET /my_locations/_search { @@ -124,7 +121,6 @@ GET /my_locations/_search } } -------------------------------------------------- -// CONSOLE [float] @@ -132,7 +128,7 @@ GET /my_locations/_search Format in `lat,lon`. -[source,js] +[source,console] -------------------------------------------------- GET /my_locations/_search { @@ -151,12 +147,11 @@ GET /my_locations/_search } } -------------------------------------------------- -// CONSOLE [float] ===== Geohash -[source,js] +[source,console] -------------------------------------------------- GET /my_locations/_search { @@ -175,7 +170,6 @@ GET /my_locations/_search } } -------------------------------------------------- -// CONSOLE [float] ==== Options diff --git a/docs/reference/query-dsl/geo-polygon-query.asciidoc b/docs/reference/query-dsl/geo-polygon-query.asciidoc index 062e44cf03d..c85462ece18 100644 --- a/docs/reference/query-dsl/geo-polygon-query.asciidoc +++ b/docs/reference/query-dsl/geo-polygon-query.asciidoc @@ -7,7 +7,7 @@ A query returning hits that only fall within a polygon of points. Here is an example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -31,7 +31,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [float] ==== Query Options @@ -57,7 +56,7 @@ Format as `[lon, lat]` Note: the order of lon/lat here must conform with http://geojson.org/[GeoJSON]. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -81,14 +80,13 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [float] ===== Lat Lon as String Format in `lat,lon`. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -112,12 +110,11 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [float] ===== Geohash -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -141,7 +138,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [float] ==== geo_point Type diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index 79395f24e3c..35b56eb28e3 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -25,7 +25,7 @@ http://www.geojson.org[GeoJSON] to represent shapes. Given the following index: -[source,js] +[source,console] -------------------------------------------------- PUT /example { @@ -47,13 +47,12 @@ POST /example/_doc?refresh } } -------------------------------------------------- -// CONSOLE // TESTSETUP The following query will find the point using the Elasticsearch's `envelope` GeoJSON extension: -[source,js] +[source,console] -------------------------------------------------- GET /example/_search { @@ -77,7 +76,6 @@ GET /example/_search } } -------------------------------------------------- -// CONSOLE ==== Pre-Indexed Shape @@ -98,7 +96,7 @@ Defaults to 'shape'. The following is an example of using the Filter with a pre-indexed shape: -[source,js] +[source,console] -------------------------------------------------- PUT /shapes { @@ -138,7 +136,6 @@ GET /example/_search } } -------------------------------------------------- -// CONSOLE ==== Spatial Relations diff --git a/docs/reference/query-dsl/has-child-query.asciidoc b/docs/reference/query-dsl/has-child-query.asciidoc index 95805dbd2e6..32a27ed2395 100644 --- a/docs/reference/query-dsl/has-child-query.asciidoc +++ b/docs/reference/query-dsl/has-child-query.asciidoc @@ -27,7 +27,7 @@ the `has_child` query, use it as rarely as possible. To use the `has_child` query, your index must include a <> field mapping. For example: -[source,js] +[source,console] ---- PUT /my_index { @@ -44,13 +44,12 @@ PUT /my_index } ---- -// CONSOLE // TESTSETUP [[has-child-query-ex-query]] ===== Example query -[source,js] +[source,console] ---- GET /_search { @@ -67,7 +66,6 @@ GET /_search } } ---- -// CONSOLE [[has-child-top-level-params]] ==== Top-level parameters for `has_child` @@ -139,7 +137,7 @@ If you need to sort returned documents by a field in their child documents, use a `function_score` query and sort by `_score`. For example, the following query sorts returned documents by the `click_count` field of their child documents. -[source,js] +[source,console] ---- GET /_search { @@ -158,4 +156,3 @@ GET /_search } } ---- -// CONSOLE diff --git a/docs/reference/query-dsl/has-parent-query.asciidoc b/docs/reference/query-dsl/has-parent-query.asciidoc index 39cb22fb750..ba31069fad9 100644 --- a/docs/reference/query-dsl/has-parent-query.asciidoc +++ b/docs/reference/query-dsl/has-parent-query.asciidoc @@ -23,7 +23,7 @@ Each `has_parent` query in a search can increase query time significantly. To use the `has_parent` query, your index must include a <> field mapping. For example: -[source,js] +[source,console] ---- PUT /my-index { @@ -43,13 +43,12 @@ PUT /my-index } ---- -// CONSOLE // TESTSETUP [[has-parent-query-ex-query]] ===== Example query -[source,js] +[source,console] ---- GET /my-index/_search { @@ -67,7 +66,6 @@ GET /my-index/_search } } ---- -// CONSOLE [[has-parent-top-level-params]] ==== Top-level parameters for `has_parent` @@ -120,7 +118,7 @@ If you need to sort returned documents by a field in their parent documents, use a `function_score` query and sort by `_score`. For example, the following query sorts returned documents by the `view_count` field of their parent documents. -[source,js] +[source,console] ---- GET /_search { @@ -139,4 +137,3 @@ GET /_search } } ---- -// CONSOLE \ No newline at end of file diff --git a/docs/reference/query-dsl/ids-query.asciidoc b/docs/reference/query-dsl/ids-query.asciidoc index feea4890745..d94d0cfc03a 100644 --- a/docs/reference/query-dsl/ids-query.asciidoc +++ b/docs/reference/query-dsl/ids-query.asciidoc @@ -9,7 +9,7 @@ the <> field. ==== Example request -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -20,7 +20,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [[ids-query-top-level-parameters]] ==== Top-level parameters for `ids` diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc index 18723e87579..e560f52e86e 100644 --- a/docs/reference/query-dsl/intervals-query.asciidoc +++ b/docs/reference/query-dsl/intervals-query.asciidoc @@ -24,7 +24,7 @@ favorite food` immediately followed by `hot water` or `cold porridge` in the This search would match a `my_text` value of `my favorite food is cold porridge` but not `when it's cold my favorite food is porridge`. -[source,js] +[source,console] -------------------------------------------------- POST _search { @@ -56,7 +56,6 @@ POST _search } } -------------------------------------------------- -// CONSOLE [[intervals-top-level-params]] ==== Top-level parameters for `intervals` @@ -273,7 +272,7 @@ The following search includes a `filter` rule. It returns documents that have the words `hot` and `porridge` within 10 positions of each other, without the word `salty` in between: -[source,js] +[source,console] -------------------------------------------------- POST _search { @@ -296,7 +295,6 @@ POST _search } } -------------------------------------------------- -// CONSOLE [[interval-script-filter]] ===== Script filters @@ -305,7 +303,7 @@ You can use a script to filter intervals based on their start position, end position, and internal gap count. The following `filter` script uses the `interval` variable with the `start`, `end`, and `gaps` methods: -[source,js] +[source,console] -------------------------------------------------- POST _search { @@ -325,7 +323,6 @@ POST _search } } -------------------------------------------------- -// CONSOLE [[interval-minimization]] @@ -337,7 +334,7 @@ when using `max_gaps` restrictions or filters. For example, take the following query, searching for `salty` contained within the phrase `hot porridge`: -[source,js] +[source,console] -------------------------------------------------- POST _search { @@ -359,7 +356,6 @@ POST _search } } -------------------------------------------------- -// CONSOLE This query does *not* match a document containing the phrase `hot porridge is salty porridge`, because the intervals returned by the match query for `hot @@ -373,7 +369,7 @@ cause surprises when used in combination with `max_gaps`. Consider the following query, searching for `the` immediately followed by `big` or `big bad`, immediately followed by `wolf`: -[source,js] +[source,console] -------------------------------------------------- POST _search { @@ -398,7 +394,6 @@ POST _search } } -------------------------------------------------- -// CONSOLE Counter-intuitively, this query does *not* match the document `the big bad wolf`, because the `any_of` rule in the middle only produces intervals @@ -407,7 +402,7 @@ starting at the same position, and so being minimized away. In these cases, it's better to rewrite the query so that all of the options are explicitly laid out at the top level: -[source,js] +[source,console] -------------------------------------------------- POST _search { @@ -431,4 +426,3 @@ POST _search } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/query-dsl/match-all-query.asciidoc b/docs/reference/query-dsl/match-all-query.asciidoc index 31d4f64aef3..8e91c8b74f3 100644 --- a/docs/reference/query-dsl/match-all-query.asciidoc +++ b/docs/reference/query-dsl/match-all-query.asciidoc @@ -7,7 +7,7 @@ The most simple query, which matches all documents, giving them all a `_score` of `1.0`. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -16,11 +16,10 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The `_score` can be changed with the `boost` parameter: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -29,7 +28,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [[query-dsl-match-none-query]] [float] @@ -37,7 +35,7 @@ GET /_search This is the inverse of the `match_all` query, which matches no documents. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -46,4 +44,3 @@ GET /_search } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/query-dsl/match-bool-prefix-query.asciidoc b/docs/reference/query-dsl/match-bool-prefix-query.asciidoc index 36699bed81a..34b1d7c0009 100644 --- a/docs/reference/query-dsl/match-bool-prefix-query.asciidoc +++ b/docs/reference/query-dsl/match-bool-prefix-query.asciidoc @@ -9,7 +9,7 @@ A `match_bool_prefix` query analyzes its input and constructs a is used in a `term` query. The last term is used in a `prefix` query. A `match_bool_prefix` query such as -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -20,12 +20,11 @@ GET /_search } } -------------------------------------------------- -// CONSOLE where analysis produces the terms `quick`, `brown`, and `f` is similar to the following `bool` query -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -40,7 +39,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE An important difference between the `match_bool_prefix` query and <> is that the @@ -57,7 +55,7 @@ By default, `match_bool_prefix` queries' input text will be analyzed using the analyzer from the queried field's mapping. A different search analyzer can be configured with the `analyzer` parameter -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -71,7 +69,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE `match_bool_prefix` queries support the <> and `operator` diff --git a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc index b6272cb441d..031dcaa5874 100644 --- a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc +++ b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc @@ -18,7 +18,7 @@ The following search returns documents that contain phrases beginning with This search would match a `message` value of `quick brown fox` or `two quick brown ferrets` but not `the fox is quick and brown`. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -31,7 +31,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [[match-phrase-prefix-top-level-params]] diff --git a/docs/reference/query-dsl/match-phrase-query.asciidoc b/docs/reference/query-dsl/match-phrase-query.asciidoc index ed847c419af..0baf3ebbe19 100644 --- a/docs/reference/query-dsl/match-phrase-query.asciidoc +++ b/docs/reference/query-dsl/match-phrase-query.asciidoc @@ -7,7 +7,7 @@ The `match_phrase` query analyzes the text and creates a `phrase` query out of the analyzed text. For example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -18,7 +18,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE A phrase query matches terms up to a configurable `slop` (which defaults to 0) in any order. Transposed terms have a slop of 2. @@ -27,7 +26,7 @@ The `analyzer` can be set to control which analyzer will perform the analysis process on the text. It defaults to the field explicit mapping definition, or the default search analyzer, for example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -41,6 +40,5 @@ GET /_search } } -------------------------------------------------- -// CONSOLE This query also accepts `zero_terms_query`, as explained in <>. diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index 575ce7c3cf9..5ceba4e61d9 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -14,7 +14,7 @@ including options for fuzzy matching. [[match-query-ex-request]] ==== Example request -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -27,7 +27,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [[match-top-level-params]] @@ -147,7 +146,7 @@ See <> for an example. You can simplify the match query syntax by combining the `` and `query` parameters. For example: -[source,js] +[source,console] ---- GET /_search { @@ -158,7 +157,6 @@ GET /_search } } ---- -// CONSOLE [[query-dsl-match-query-boolean]] ===== How the match query works @@ -173,7 +171,7 @@ parameter. Here is an example with the `operator` parameter: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -187,7 +185,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The `analyzer` can be set to control which analyzer will perform the analysis process on the text. It defaults to the field explicit mapping @@ -218,7 +215,7 @@ analysis process produces multiple tokens at the same position. Under the hood these terms are expanded to a special synonym query that blends term frequencies, which does not support fuzzy expansion. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -232,7 +229,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [[query-dsl-match-query-zero]] ===== Zero terms query @@ -241,7 +237,7 @@ does, the default behavior is to match no documents at all. In order to change that the `zero_terms_query` option can be used, which accepts `none` (default) and `all` which corresponds to a `match_all` query. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -256,7 +252,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [[query-dsl-match-query-cutoff]] ===== Cutoff frequency @@ -284,7 +279,7 @@ documents if in the range `[0..1)` or absolute if greater or equal to Here is an example showing a query composed of stopwords exclusively: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -298,7 +293,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[warning:Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]] IMPORTANT: The `cutoff_frequency` option operates on a per-shard-level. This means @@ -316,7 +310,7 @@ For example, the following synonym: `"ny, new york" would produce:` It is also possible to match multi terms synonyms with conjunctions instead: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -330,7 +324,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The example above creates a boolean query: diff --git a/docs/reference/query-dsl/minimum-should-match.asciidoc b/docs/reference/query-dsl/minimum-should-match.asciidoc index e0610ca4240..fc0479265fc 100644 --- a/docs/reference/query-dsl/minimum-should-match.asciidoc +++ b/docs/reference/query-dsl/minimum-should-match.asciidoc @@ -1,7 +1,7 @@ [[query-dsl-minimum-should-match]] == `minimum_should_match` parameter -The `minimum_should_match` parameter possible values: +The `minimum_should_match` parameter's possible values: [cols="<,<,<",options="header",] |======================================================================= diff --git a/docs/reference/query-dsl/mlt-query.asciidoc b/docs/reference/query-dsl/mlt-query.asciidoc index 1d9de562083..d8a2063bdf2 100644 --- a/docs/reference/query-dsl/mlt-query.asciidoc +++ b/docs/reference/query-dsl/mlt-query.asciidoc @@ -15,7 +15,7 @@ provided piece of text. Here, we are asking for all movies that have some text similar to "Once upon a time" in their "title" and in their "description" fields, limiting the number of selected terms to 12. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -29,13 +29,12 @@ GET /_search } } -------------------------------------------------- -// CONSOLE A more complicated use case consists of mixing texts with documents already existing in the index. In this case, the syntax to specify a document is similar to the one used in the <>. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -59,13 +58,12 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Finally, users can mix some texts, a chosen set of documents but also provide documents not necessarily present in the index. To provide documents not present in the index, the syntax is similar to <>. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -94,7 +92,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE ==== How it Works @@ -120,7 +117,7 @@ we can explicitly store their `term_vector` at index time. We can still perform MLT on the "description" and "tags" fields, as `_source` is enabled by default, but there will be no speed up on analysis for these fields. -[source,js] +[source,console] -------------------------------------------------- PUT /imdb { @@ -147,7 +144,6 @@ PUT /imdb } } -------------------------------------------------- -// CONSOLE ==== Parameters diff --git a/docs/reference/query-dsl/multi-match-query.asciidoc b/docs/reference/query-dsl/multi-match-query.asciidoc index 13a238d9048..aeec7326693 100644 --- a/docs/reference/query-dsl/multi-match-query.asciidoc +++ b/docs/reference/query-dsl/multi-match-query.asciidoc @@ -7,7 +7,7 @@ The `multi_match` query builds on the <> to allow multi-field queries: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -19,7 +19,7 @@ GET /_search } } -------------------------------------------------- -// CONSOLE + <1> The query string. <2> The fields to be queried. @@ -29,7 +29,7 @@ GET /_search Fields can be specified with wildcards, eg: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -41,12 +41,12 @@ GET /_search } } -------------------------------------------------- -// CONSOLE + <1> Query the `title`, `first_name` and `last_name` fields. Individual fields can be boosted with the caret (`^`) notation: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -58,7 +58,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE <1> The `subject` field is three times as important as the `message` field. @@ -110,7 +109,7 @@ The `best_fields` type generates a <> for each field and wraps them in a <> query, to find the single best matching field. For instance, this query: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -124,11 +123,10 @@ GET /_search } } -------------------------------------------------- -// CONSOLE would be executed as: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -143,7 +141,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Normally the `best_fields` type uses the score of the *single* best matching field, but if `tie_breaker` is specified, then it calculates the score as @@ -169,7 +166,7 @@ which is probably not what you want. Take this query for example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -183,7 +180,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE <1> All terms must be present. @@ -212,7 +208,7 @@ to push the most similar results to the top of the list. This query: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -225,11 +221,10 @@ GET /_search } } -------------------------------------------------- -// CONSOLE would be executed as: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -244,7 +239,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The score from each `match` clause is added together, then divided by the number of `match` clauses. @@ -262,7 +256,8 @@ but they use a `match_phrase` or `match_phrase_prefix` query instead of a `match` query. This query: -[source,js] + +[source,console] -------------------------------------------------- GET /_search { @@ -275,11 +270,10 @@ GET /_search } } -------------------------------------------------- -// CONSOLE would be executed as: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -293,7 +287,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Also, accepts `analyzer`, <>, `lenient` and `zero_terms_query` as explained in <>, as well as `slop` which is explained in <>. @@ -346,7 +339,7 @@ big field. A query like: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -360,7 +353,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE is executed as: @@ -407,7 +399,7 @@ For instance, if we have a `first` and `last` field which have the same analyzer, plus a `first.edge` and `last.edge` which both use an `edge_ngram` analyzer, this query: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -423,7 +415,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE would be executed as: @@ -446,7 +437,7 @@ You can easily rewrite this query yourself as two separate `cross_fields` queries combined with a `bool` query, and apply the `minimum_should_match` parameter to just one of them: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -473,7 +464,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE <1> Either `will` or `smith` must be present in either of the `first` or `last` fields @@ -481,7 +471,7 @@ GET /_search You can force all fields into the same group by specifying the `analyzer` parameter in the query. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -495,7 +485,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE <1> Use the `standard` analyzer for all fields. @@ -534,7 +523,7 @@ The `bool_prefix` type's scoring behaves like <>, but using a <> instead of a `match` query. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -547,7 +536,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The `analyzer`, `boost`, `operator`, `minimum_should_match`, `lenient`, `zero_terms_query`, and `auto_generate_synonyms_phrase_query` parameters as diff --git a/docs/reference/query-dsl/nested-query.asciidoc b/docs/reference/query-dsl/nested-query.asciidoc index a35d3ed3fac..619fe8c3dda 100644 --- a/docs/reference/query-dsl/nested-query.asciidoc +++ b/docs/reference/query-dsl/nested-query.asciidoc @@ -19,7 +19,7 @@ the root parent document. To use the `nested` query, your index must include a <> field mapping. For example: -[source,js] +[source,console] ---- PUT /my_index { @@ -33,13 +33,12 @@ PUT /my_index } ---- -// CONSOLE // TESTSETUP [[nested-query-ex-query]] ===== Example query -[source,js] +[source,console] ---- GET /my_index/_search { @@ -59,7 +58,6 @@ GET /my_index/_search } } ---- -// CONSOLE [[nested-top-level-params]] ==== Top-level parameters for `nested` diff --git a/docs/reference/query-dsl/parent-id-query.asciidoc b/docs/reference/query-dsl/parent-id-query.asciidoc index 3add028c5c1..7248fe74b56 100644 --- a/docs/reference/query-dsl/parent-id-query.asciidoc +++ b/docs/reference/query-dsl/parent-id-query.asciidoc @@ -20,7 +20,7 @@ the following example. . Create an index with a <> field mapping. + -- -[source,js] +[source,console] ---- PUT /my-index { @@ -37,14 +37,13 @@ PUT /my-index } ---- -// CONSOLE // TESTSETUP -- . Index a parent document with an ID of `1`. + -- -[source,js] +[source,console] ---- PUT /my-index/_doc/1?refresh { @@ -52,13 +51,12 @@ PUT /my-index/_doc/1?refresh "my-join-field": "my-parent" } ---- -// CONSOLE -- . Index a child document of the parent document. + -- -[source,js] +[source,console] ---- PUT /my-index/_doc/2?routing=1&refresh { @@ -69,7 +67,6 @@ PUT /my-index/_doc/2?routing=1&refresh } } ---- -// CONSOLE -- [[parent-id-query-ex-query]] @@ -78,7 +75,7 @@ PUT /my-index/_doc/2?routing=1&refresh The following search returns child documents for a parent document with an ID of `1`. -[source,js] +[source,console] ---- GET /my-index/_search { @@ -90,7 +87,6 @@ GET /my-index/_search } } ---- -// CONSOLE [[parent-id-top-level-params]] ==== Top-level parameters for `parent_id` diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index 192b80b4735..a1b2cf7ff5d 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -14,7 +14,7 @@ to match with the stored queries. Create an index with two fields: -[source,js] +[source,console] -------------------------------------------------- PUT /my-index { @@ -30,7 +30,6 @@ PUT /my-index } } -------------------------------------------------- -// CONSOLE The `message` field is the field used to preprocess the document defined in the `percolator` query before it gets indexed into a temporary index. @@ -43,7 +42,7 @@ used later on to match documents defined on the `percolate` query. Register a query in the percolator: -[source,js] +[source,console] -------------------------------------------------- PUT /my-index/_doc/1?refresh { @@ -54,12 +53,11 @@ PUT /my-index/_doc/1?refresh } } -------------------------------------------------- -// CONSOLE // TEST[continued] Match a document to the registered percolator queries: -[source,js] +[source,console] -------------------------------------------------- GET /my-index/_search { @@ -73,7 +71,6 @@ GET /my-index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] The above request will yield the following response: @@ -158,7 +155,7 @@ In that case the `document` parameter can be substituted with the following para In case you are not interested in the score, better performance can be expected by wrapping the percolator query in a `bool` query's filter clause or in a `constant_score` query: -[source,js] +[source,console] -------------------------------------------------- GET /my-index/_search { @@ -176,7 +173,6 @@ GET /my-index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] At index time terms are extracted from the percolator query and the percolator @@ -199,7 +195,7 @@ The `_percolator_document_slot` field that is being returned with each matched p multiple documents simultaneously. It indicates which documents matched with a particular percolator query. The numbers correlate with the slot in the `documents` array specified in the `percolate` query. -[source,js] +[source,console] -------------------------------------------------- GET /my-index/_search { @@ -224,7 +220,6 @@ GET /my-index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] <1> The documents array contains 4 documents that are going to be percolated at the same time. @@ -286,14 +281,13 @@ Based on the previous example. Index the document we want to percolate: -[source,js] +[source,console] -------------------------------------------------- PUT /my-index/_doc/2 { "message" : "A new bonsai tree in the office" } -------------------------------------------------- -// CONSOLE // TEST[continued] Index response: @@ -317,7 +311,7 @@ Index response: Percolating an existing document, using the index response as basis to build to new search request: -[source,js] +[source,console] -------------------------------------------------- GET /my-index/_search { @@ -331,7 +325,6 @@ GET /my-index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] <1> The version is optional, but useful in certain cases. We can ensure that we are trying to percolate @@ -354,7 +347,7 @@ This example is based on the mapping of the first example. Save a query: -[source,js] +[source,console] -------------------------------------------------- PUT /my-index/_doc/3?refresh { @@ -365,12 +358,11 @@ PUT /my-index/_doc/3?refresh } } -------------------------------------------------- -// CONSOLE // TEST[continued] Save another query: -[source,js] +[source,console] -------------------------------------------------- PUT /my-index/_doc/4?refresh { @@ -381,12 +373,11 @@ PUT /my-index/_doc/4?refresh } } -------------------------------------------------- -// CONSOLE // TEST[continued] Execute a search request with the `percolate` query and highlighting enabled: -[source,js] +[source,console] -------------------------------------------------- GET /my-index/_search { @@ -405,7 +396,6 @@ GET /my-index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] This will yield the following response. @@ -483,7 +473,7 @@ the document defined in the `percolate` query. When percolating multiple documents at the same time like the request below then the highlight response is different: -[source,js] +[source,console] -------------------------------------------------- GET /my-index/_search { @@ -513,7 +503,6 @@ GET /my-index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] The slightly different response: @@ -577,7 +566,7 @@ The slightly different response: It is possible to specify multiple `percolate` queries in a single search request: -[source,js] +[source,console] -------------------------------------------------- GET /my-index/_search { @@ -607,7 +596,6 @@ GET /my-index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] <1> The `name` parameter will be used to identify which percolator document slots belong to what `percolate` query. @@ -683,7 +671,7 @@ or the unsupported query is the only query in the percolator document). These q can be found by running the following search: -[source,js] +[source,console] --------------------------------------------------- GET /_search { @@ -694,7 +682,6 @@ GET /_search } } --------------------------------------------------- -// CONSOLE NOTE: The above example assumes that there is a `query` field of type `percolator` in the mappings. diff --git a/docs/reference/query-dsl/pinned-query.asciidoc b/docs/reference/query-dsl/pinned-query.asciidoc index 9768a1aa817..a5ae18d2961 100644 --- a/docs/reference/query-dsl/pinned-query.asciidoc +++ b/docs/reference/query-dsl/pinned-query.asciidoc @@ -10,7 +10,7 @@ the <> field. ==== Example request -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -26,7 +26,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [[pinned-query-top-level-parameters]] ==== Top-level parameters for `pinned` diff --git a/docs/reference/query-dsl/prefix-query.asciidoc b/docs/reference/query-dsl/prefix-query.asciidoc index 25cf0fc5bf8..780de433aab 100644 --- a/docs/reference/query-dsl/prefix-query.asciidoc +++ b/docs/reference/query-dsl/prefix-query.asciidoc @@ -12,7 +12,7 @@ Returns documents that contain a specific prefix in a provided field. The following search returns documents where the `user` field contains a term that begins with `ki`. -[source,js] +[source,console] ---- GET /_search { @@ -25,7 +25,6 @@ GET /_search } } ---- -// CONSOLE [[prefix-query-top-level-params]] ==== Top-level parameters for `prefix` @@ -50,7 +49,7 @@ information, see the <>. You can simplify the `prefix` query syntax by combining the `` and `value` parameters. For example: -[source,js] +[source,console] ---- GET /_search { @@ -59,7 +58,6 @@ GET /_search } } ---- -// CONSOLE [[prefix-query-index-prefixes]] ===== Speed up prefix queries diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index cced4f30eeb..56eb3b6efb5 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -38,7 +38,7 @@ city) OR (big apple)` into two parts: `new york city` and `big apple`. The before returning matching documents. Because the query syntax does not use whitespace as an operator, `new york city` is passed as-is to the analyzer. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -50,7 +50,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [[query-string-top-level-params]] ==== Top-level parameters for `query_string` @@ -252,7 +251,7 @@ field1:query_term OR field2:query_term | ... For example, the following query -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -264,12 +263,11 @@ GET /_search } } -------------------------------------------------- -// CONSOLE matches the same words as -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -280,13 +278,12 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Since several queries are generated from the individual search terms, combining them is automatically done using a `dis_max` query with a `tie_breaker`. For example (the `name` is boosted by 5 using `^5` notation): -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -299,14 +296,13 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Simple wildcard can also be used to search "within" specific inner elements of the document. For example, if we have a `city` object with several fields (or inner object with fields) in it, we can automatically search on all "city" fields: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -318,13 +314,12 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Another option is to provide the wildcard fields search in the query string itself (properly escaping the `*` sign), for example: `city.\*:something`: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -335,7 +330,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE NOTE: Since `\` (backslash) is a special character in json strings, it needs to be escaped, hence the two backslashes in the above `query_string`. @@ -344,7 +338,7 @@ The fields parameter can also include pattern based field names, allowing to automatically expand to the relevant fields (dynamically introduced fields included). For example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -356,7 +350,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [[query-string-multi-field-parms]] ====== Additional parameters for multiple field searches @@ -411,7 +404,7 @@ For example, the following synonym: `ny, new york` would produce: It is also possible to match multi terms synonyms with conjunctions instead: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -424,7 +417,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The example above creates a boolean query: @@ -440,7 +432,7 @@ The `query_string` splits the query around each operator to create a boolean query for the entire input. You can use `minimum_should_match` to control how many "should" clauses in the resulting query should match. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -455,7 +447,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The example above creates a boolean query: @@ -467,7 +458,7 @@ in the single field `title`. [[query-string-min-should-match-multi]] ===== How `minimum_should_match` works for multiple fields -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -483,7 +474,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The example above creates a boolean query: @@ -492,7 +482,7 @@ The example above creates a boolean query: that matches documents with the disjunction max over the fields `title` and `content`. Here the `minimum_should_match` parameter can't be applied. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -508,7 +498,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Adding explicit operators forces each term to be considered as a separate clause. @@ -525,7 +514,7 @@ them made of the disjunction max over the fields for each term. A `cross_fields` value in the `type` field indicates fields with the same analyzer are grouped together when the input is analyzed. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -542,7 +531,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The example above creates a boolean query: diff --git a/docs/reference/query-dsl/query_filter_context.asciidoc b/docs/reference/query-dsl/query_filter_context.asciidoc index dd2b3cfd478..9a6b728ea74 100644 --- a/docs/reference/query-dsl/query_filter_context.asciidoc +++ b/docs/reference/query-dsl/query_filter_context.asciidoc @@ -58,7 +58,7 @@ conditions are met: * The `status` field contains the exact word `published`. * The `publish_date` field contains a date from 1 Jan 2015 onwards. -[source,js] +[source,console] ------------------------------------ GET /_search { @@ -76,7 +76,7 @@ GET /_search } } ------------------------------------ -// CONSOLE + <1> The `query` parameter indicates query context. <2> The `bool` and two `match` clauses are used in query context, which means that they are used to score how well each document diff --git a/docs/reference/query-dsl/range-query.asciidoc b/docs/reference/query-dsl/range-query.asciidoc index d8518afda57..52972fe4b38 100644 --- a/docs/reference/query-dsl/range-query.asciidoc +++ b/docs/reference/query-dsl/range-query.asciidoc @@ -12,7 +12,7 @@ Returns documents that contain terms within a provided range. The following search returns documents where the `age` field contains a term between `10` and `20`. -[source,js] +[source,console] ---- GET _search { @@ -27,7 +27,6 @@ GET _search } } ---- -// CONSOLE [[range-query-top-level-params]] ==== Top-level parameters for `range` @@ -149,7 +148,7 @@ When the `` parameter is a <> field datatype, you can use For example, the following search returns documents where the `timestamp` field contains a date between today and yesterday. -[source,js] +[source,console] ---- GET _search { @@ -163,7 +162,6 @@ GET _search } } ---- -// CONSOLE [[range-query-date-math-rounding]] @@ -175,7 +173,7 @@ GET _search -- Rounds up to the lastest millisecond. -For example, `2014-11-18||/M` rounds up to `2014-11-30T23:59:59.999`, including +For example, `2014-11-18||/M` rounds up to `2014-11-30T23:59:59.999`, excluding the entire month. -- @@ -184,7 +182,7 @@ the entire month. -- Rounds down to the first millisecond. -For example, `2014-11-18||/M` rounds down to `2014-11-01`, excluding +For example, `2014-11-18||/M` rounds down to `2014-11-01`, including the entire month. -- @@ -212,7 +210,7 @@ the entire month. You can use the `time_zone` parameter to convert `date` values to UTC using a UTC offset. For example: -[source,js] +[source,console] ---- GET _search { @@ -227,8 +225,8 @@ GET _search } } ---- -// CONSOLE + <1> Indicates that `date` values use a UTC offset of `+01:00`. <2> With a UTC offset of `+01:00`, {es} converts this date to `2014-12-31T23:00:00 UTC`. -<3> The `time_zone` parameter does not affect the `now` value. \ No newline at end of file +<3> The `time_zone` parameter does not affect the `now` value. diff --git a/docs/reference/query-dsl/rank-feature-query.asciidoc b/docs/reference/query-dsl/rank-feature-query.asciidoc index 9a132e3e5d3..079145793bc 100644 --- a/docs/reference/query-dsl/rank-feature-query.asciidoc +++ b/docs/reference/query-dsl/rank-feature-query.asciidoc @@ -53,7 +53,7 @@ to relevance, indicated by a `positive_score_impact` value of `false`. - `topics`, a <> field which contains a list of topics and a measure of how well each document is connected to this topic -[source,js] +[source,console] ---- PUT /test { @@ -73,13 +73,12 @@ PUT /test } } ---- -// CONSOLE // TESTSETUP Index several documents to the `test` index. -[source,js] +[source,console] ---- PUT /test/_doc/1?refresh { @@ -118,7 +117,6 @@ PUT /test/_doc/3?refresh } } ---- -// CONSOLE [[rank-feature-query-ex-query]] ===== Example query @@ -126,7 +124,7 @@ PUT /test/_doc/3?refresh The following query searches for `2016` and boosts relevance scores based or `pagerank`, `url_length`, and the `sports` topic. -[source,js] +[source,console] ---- GET /test/_search { @@ -162,7 +160,6 @@ GET /test/_search } } ---- -// CONSOLE [[rank-feature-top-level-params]] @@ -232,7 +229,7 @@ than `0.5` otherwise. Scores are always `(0,1)`. If the rank feature has a negative score impact then the function will be computed as `pivot / (S + pivot)`, which decreases when `S` increases. -[source,js] +[source,console] -------------------------------------------------- GET /test/_search { @@ -246,14 +243,13 @@ GET /test/_search } } -------------------------------------------------- -// CONSOLE If a `pivot` value is not provided, {es} computes a default value equal to the approximate geometric mean of all rank feature values in the index. We recommend using this default value if you haven't had the opportunity to train a good pivot value. -[source,js] +[source,console] -------------------------------------------------- GET /test/_search { @@ -265,7 +261,6 @@ GET /test/_search } } -------------------------------------------------- -// CONSOLE [[rank-feature-query-logarithm]] ===== Logarithm @@ -275,7 +270,7 @@ scaling factor. Scores are unbounded. This function only supports rank features that have a positive score impact. -[source,js] +[source,console] -------------------------------------------------- GET /test/_search { @@ -289,7 +284,6 @@ GET /test/_search } } -------------------------------------------------- -// CONSOLE [[rank-feature-query-sigmoid]] ===== Sigmoid @@ -302,7 +296,7 @@ The `exponent` must be positive and is typically in `[0.5, 1]`. A good value should be computed via training. If you don't have the opportunity to do so, we recommend you use the `saturation` function instead. -[source,js] +[source,console] -------------------------------------------------- GET /test/_search { @@ -317,4 +311,3 @@ GET /test/_search } } -------------------------------------------------- -// CONSOLE \ No newline at end of file diff --git a/docs/reference/query-dsl/regexp-query.asciidoc b/docs/reference/query-dsl/regexp-query.asciidoc index 1feed72d45b..e92424afbc2 100644 --- a/docs/reference/query-dsl/regexp-query.asciidoc +++ b/docs/reference/query-dsl/regexp-query.asciidoc @@ -19,7 +19,7 @@ that begins with `k` and ends with `y`. The `.*` operators match any characters of any length, including no characters. Matching terms can include `ky`, `kay`, and `kimchy`. -[source,js] +[source,console] ---- GET /_search { @@ -35,7 +35,6 @@ GET /_search } } ---- -// CONSOLE [[regexp-top-level-params]] diff --git a/docs/reference/query-dsl/script-query.asciidoc b/docs/reference/query-dsl/script-query.asciidoc index e8c349ea81d..5ed6d4e91f6 100644 --- a/docs/reference/query-dsl/script-query.asciidoc +++ b/docs/reference/query-dsl/script-query.asciidoc @@ -11,7 +11,7 @@ Filters documents based on a provided <>. The [[script-query-ex-request]] ==== Example request -[source,js] +[source,console] ---- GET /_search { @@ -29,7 +29,6 @@ GET /_search } } ---- -// CONSOLE [[script-top-level-params]] @@ -49,7 +48,7 @@ Like <>, scripts are cached for faster execution. If you frequently change the arguments of a script, we recommend you store them in the script's `params` parameter. For example: -[source,js] +[source,console] ---- GET /_search { @@ -70,4 +69,3 @@ GET /_search } } ---- -// CONSOLE \ No newline at end of file diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index fce58709f15..53abfc90bc1 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -14,7 +14,7 @@ The `script_score` query is useful if, for example, a scoring function is expens ==== Example request The following `script_score` query assigns each returned document a score equal to the `likes` field value divided by `10`. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -30,7 +30,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [[script-score-top-level-params]] diff --git a/docs/reference/query-dsl/shape-query.asciidoc b/docs/reference/query-dsl/shape-query.asciidoc index d9073078015..6a1c7380976 100644 --- a/docs/reference/query-dsl/shape-query.asciidoc +++ b/docs/reference/query-dsl/shape-query.asciidoc @@ -24,7 +24,7 @@ https://en.wikipedia.org/wiki/Well-known_text_representation_of_geometry[Well Kn Given the following index: -[source,js] +[source,console] -------------------------------------------------- PUT /example { @@ -46,13 +46,12 @@ POST /example/_doc?refresh } } -------------------------------------------------- -// CONSOLE // TESTSETUP The following query will find the point using the Elasticsearch's `envelope` GeoJSON extension: -[source,js] +[source,console] -------------------------------------------------- GET /example/_search { @@ -69,7 +68,6 @@ GET /example/_search } } -------------------------------------------------- -// CONSOLE ==== Pre-Indexed Shape @@ -90,7 +88,7 @@ Defaults to 'shape'. The following is an example of using the Filter with a pre-indexed shape: -[source,js] +[source,console] -------------------------------------------------- PUT /shapes { @@ -126,7 +124,6 @@ GET /example/_search } } -------------------------------------------------- -// CONSOLE ==== Spatial Relations diff --git a/docs/reference/query-dsl/simple-query-string-query.asciidoc b/docs/reference/query-dsl/simple-query-string-query.asciidoc index 44f811007a6..6659a9d860e 100644 --- a/docs/reference/query-dsl/simple-query-string-query.asciidoc +++ b/docs/reference/query-dsl/simple-query-string-query.asciidoc @@ -20,7 +20,7 @@ parts of the query string. [[simple-query-string-query-ex-request]] ==== Example request -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -33,7 +33,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [[simple-query-string-top-level-params]] @@ -153,7 +152,7 @@ To use one of these characters literally, escape it with a preceding backslash The behavior of these operators may differ depending on the `default_operator` value. For example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -165,7 +164,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE This search is intended to only return documents containing `foo` or `bar` that also do **not** contain `baz`. However because of a `default_operator` of `OR`, @@ -182,7 +180,7 @@ To explicitly enable only specific operators, use a `|` separator. For example, a `flags` value of `OR|AND|PREFIX` disables all operators except `OR`, `AND`, and `PREFIX`. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -194,7 +192,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE [[supported-flags-values]] ====== Valid values @@ -247,7 +244,7 @@ Enables whitespace as split characters. Fields can be specified with wildcards, eg: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -259,12 +256,12 @@ GET /_search } } -------------------------------------------------- -// CONSOLE + <1> Query the `title`, `first_name` and `last_name` fields. Individual fields can be boosted with the caret (`^`) notation: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -276,7 +273,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE <1> The `subject` field is three times as important as the `message` field. @@ -291,7 +287,7 @@ For example, the following synonym: `"ny, new york" would produce:` It is also possible to match multi terms synonyms with conjunctions instead: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -303,7 +299,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The example above creates a boolean query: diff --git a/docs/reference/query-dsl/span-field-masking-query.asciidoc b/docs/reference/query-dsl/span-field-masking-query.asciidoc index 67ef67e5951..a101c8afc47 100644 --- a/docs/reference/query-dsl/span-field-masking-query.asciidoc +++ b/docs/reference/query-dsl/span-field-masking-query.asciidoc @@ -12,7 +12,7 @@ Span field masking query is invaluable in conjunction with *multi-fields* when s Example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -41,6 +41,5 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Note: as span field masking query returns the masked field, scoring will be done using the norms of the field name supplied. This may lead to unexpected scoring behaviour. \ No newline at end of file diff --git a/docs/reference/query-dsl/span-first-query.asciidoc b/docs/reference/query-dsl/span-first-query.asciidoc index a5d23071f79..6db0f28c81d 100644 --- a/docs/reference/query-dsl/span-first-query.asciidoc +++ b/docs/reference/query-dsl/span-first-query.asciidoc @@ -7,7 +7,7 @@ Matches spans near the beginning of a field. The span first query maps to Lucene `SpanFirstQuery`. Here is an example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -21,7 +21,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The `match` clause can be any other span type query. The `end` controls the maximum end position permitted in a match. diff --git a/docs/reference/query-dsl/span-multi-term-query.asciidoc b/docs/reference/query-dsl/span-multi-term-query.asciidoc index c645d45e237..5c3fef19dd5 100644 --- a/docs/reference/query-dsl/span-multi-term-query.asciidoc +++ b/docs/reference/query-dsl/span-multi-term-query.asciidoc @@ -8,7 +8,7 @@ The `span_multi` query allows you to wrap a `multi term query` (one of wildcard, fuzzy, prefix, range or regexp query) as a `span query`, so it can be nested. Example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -21,11 +21,10 @@ GET /_search } } -------------------------------------------------- -// CONSOLE A boost can also be associated with the query: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -38,7 +37,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE WARNING: `span_multi` queries will hit too many clauses failure if the number of terms that match the query exceeds the boolean query limit (defaults to 1024).To avoid an unbounded expansion you can set the <> `long` field. This field contains the number of matching terms required to return a document. -[source,js] +[source,console] ---- PUT /job-candidates { @@ -64,7 +64,6 @@ PUT /job-candidates } } ---- -// CONSOLE // TESTSETUP -- @@ -82,7 +81,7 @@ PUT /job-candidates Include the `?refresh` parameter so the document is immediately available for search. -[source,js] +[source,console] ---- PUT /job-candidates/_doc/1?refresh { @@ -91,7 +90,6 @@ PUT /job-candidates/_doc/1?refresh "required_matches": 2 } ---- -// CONSOLE -- @@ -105,7 +103,7 @@ PUT /job-candidates/_doc/1?refresh * `2` in the `required_matches` field. -[source,js] +[source,console] ---- PUT /job-candidates/_doc/2?refresh { @@ -114,7 +112,6 @@ PUT /job-candidates/_doc/2?refresh "required_matches": 2 } ---- -// CONSOLE -- @@ -135,7 +132,7 @@ The `minimum_should_match_field` is `required_matches`. This means the number of matching terms required is `2`, the value of the `required_matches` field. -[source,js] +[source,console] ---- GET /job-candidates/_search { @@ -149,7 +146,6 @@ GET /job-candidates/_search } } ---- -// CONSOLE [[terms-set-top-level-params]] ==== Top-level parameters for `terms_set` @@ -214,7 +210,7 @@ number of terms provided in the `terms` field. * The required number of terms to match is `2`, the value of the `required_matches` field. -[source,js] +[source,console] ---- GET /job-candidates/_search { @@ -231,4 +227,3 @@ GET /job-candidates/_search } } ---- -// CONSOLE \ No newline at end of file diff --git a/docs/reference/query-dsl/type-query.asciidoc b/docs/reference/query-dsl/type-query.asciidoc index 4364d1e14e9..82272968f91 100644 --- a/docs/reference/query-dsl/type-query.asciidoc +++ b/docs/reference/query-dsl/type-query.asciidoc @@ -5,7 +5,7 @@ deprecated[7.0.0,Types and the `type` query are deprecated and in the process of Filters documents matching the provided document / mapping type. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -16,4 +16,3 @@ GET /_search } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/query-dsl/wildcard-query.asciidoc b/docs/reference/query-dsl/wildcard-query.asciidoc index 95e52352a76..5cc1dacfb6e 100644 --- a/docs/reference/query-dsl/wildcard-query.asciidoc +++ b/docs/reference/query-dsl/wildcard-query.asciidoc @@ -17,7 +17,7 @@ The following search returns documents where the `user` field contains a term that begins with `ki` and ends with `y`. These matching terms can include `kiy`, `kity`, or `kimchy`. -[source,js] +[source,console] ---- GET /_search { @@ -32,7 +32,6 @@ GET /_search } } ---- -// CONSOLE [[wildcard-top-level-params]] ==== Top-level parameters for `wildcard` diff --git a/docs/reference/query-dsl/wrapper-query.asciidoc b/docs/reference/query-dsl/wrapper-query.asciidoc index 010e086056d..02c594a41f6 100644 --- a/docs/reference/query-dsl/wrapper-query.asciidoc +++ b/docs/reference/query-dsl/wrapper-query.asciidoc @@ -6,7 +6,7 @@ A query that accepts any other query as base64 encoded string. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -17,7 +17,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE <1> Base64 encoded string: `{"term" : { "user" : "Kimchy" }}` diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index a2153c09407..8b459a8f098 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -413,7 +413,7 @@ GET _search move the query and filter to the `must` and `filter` parameters in the `bool` query: -[source,js] +[source,console] ------------------------- GET _search { @@ -433,7 +433,6 @@ GET _search } } ------------------------- -// CONSOLE [role="exclude",id="query-dsl-or-query"] === Or query @@ -816,4 +815,73 @@ See <>. [role="exclude",id="ccs-reduction"] === {ccs-cap} reduction -See <>. \ No newline at end of file +See <>. + +[role="exclude",id="administer-elasticsearch"] +=== Administering {es} +See <>. + +[role="exclude",id="delete-data-frame-transform"] +=== Delete {transforms} API + +See <>. + +[role="exclude",id="get-data-frame-transform-stats"] +=== Get {transform} statistics API + +See <>. + +[role="exclude",id="get-data-frame-transform"] +=== Get {transforms} API + +See <>. + +[role="exclude",id="preview-data-frame-transform"] +=== Preview {transforms} API + +See <>. + +[role="exclude",id="put-data-frame-transform"] +=== Create {transforms} API + +See <>. + +[role="exclude",id="start-data-frame-transform"] +=== Start {transforms} API + +See <>. + +[role="exclude",id="stop-data-frame-transform"] +=== Stop {transforms} API + +See <>. + +[role="exclude",id="update-data-frame-transform"] +=== Update {transforms} API + +See <>. + +[role="exclude",id="data-frame-apis"] +=== {transform-cap} APIs + +See <>. + +[role="exclude",id="data-frame-transform-resource"] +=== {transform-cap} resources + +See <>. + +[role="exclude",id="data-frame-transform-dest"] +=== Dest objects + +See <>. + +[role="exclude",id="data-frame-transform-source"] +==== Source objects + +See <>. + +[role="exclude",id="data-frame-transform-pivot"] +==== Pivot objects + +See <>. diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 215f06b2c4f..3ffb7cbe29a 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -7,6 +7,7 @@ This section summarizes the changes in each release. * <> +* <> * <> * <> * <> diff --git a/docs/reference/release-notes/7.2.asciidoc b/docs/reference/release-notes/7.2.asciidoc index 711ff9287d3..6f6c12ed65f 100644 --- a/docs/reference/release-notes/7.2.asciidoc +++ b/docs/reference/release-notes/7.2.asciidoc @@ -236,7 +236,6 @@ Docs Infrastructure:: * Docs: Simplifying setup by using module configuration variant syntax {pull}40879[#40879] Engine:: -* Use reader attributes to control term dict memory useage {pull}42838[#42838] (issue: {issue}38390[#38390]) * Simplify initialization of max_seq_no of updates {pull}41161[#41161] (issues: {issue}33842[#33842], {issue}40249[#40249]) * Adjust init map size of user data of index commit {pull}40965[#40965] * Don't mark shard as refreshPending on stats fetching {pull}40458[#40458] (issues: {issue}33835[#33835], {issue}33847[#33847]) diff --git a/docs/reference/release-notes/7.3.asciidoc b/docs/reference/release-notes/7.3.asciidoc index 9a70f9e0499..975f183514d 100644 --- a/docs/reference/release-notes/7.3.asciidoc +++ b/docs/reference/release-notes/7.3.asciidoc @@ -1,3 +1,37 @@ +[[release-notes-7.3.2]] +== {es} version 7.3.2 + +Also see <>. + +[[bug-7.3.2]] +[float] +=== Bug fixes + +Data Frame:: +* Fix off-by-one error in checkpoint operations_behind {pull}46235[#46235] + +Distributed:: +* Update translog checkpoint after marking operations as persisted {pull}45634[#45634] (issue: {issue}29161[#29161]) + +Engine:: +* Handle delete document level failures {pull}46100[#46100] (issue: {issue}46083[#46083]) +* Handle no-op document level failures {pull}46083[#46083] + +Infra/Scripting:: +* Fix bugs in Painless SCatch node {pull}45880[#45880] + +Machine learning:: +* Throw an error when a datafeed needs {ccs} but it is not enabled for the node {pull}46044[#46044] + +SQL:: +* SQL: Fix issue with IIF function when condition folds {pull}46290[#46290] (issue: {issue}46268[#46268]) +* SQL: Fix issue with DataType for CASE with NULL {pull}46173[#46173] (issue: {issue}46032[#46032]) + +Search:: +* Multi-get requests should wait for search active {pull}46283[#46283] (issue: {issue}27500[#27500]) +* Ensure top docs optimization is fully disabled for queries with unbounded max scores. {pull}46105[#46105] (issue: {issue}45933[#45933]) + + [[release-notes-7.3.1]] == {es} version 7.3.1 @@ -228,6 +262,7 @@ Distributed:: * Improve Close Index Response {pull}39687[#39687] (issue: {issue}33888[#33888]) Engine:: +* Use reader attributes to control term dict memory useage {pull}42838[#42838] (issue: {issue}38390[#38390]) * Remove sort by primary term when reading soft-deletes {pull}43845[#43845] * Refresh translog stats after translog trimming in NoOpEngine {pull}43825[#43825] (issue: {issue}43156[#43156]) * Expose translog stats in ReadOnlyEngine {pull}43752[#43752] diff --git a/docs/reference/release-notes/7.4.asciidoc b/docs/reference/release-notes/7.4.asciidoc index 010b23b447d..1d8fd00950a 100644 --- a/docs/reference/release-notes/7.4.asciidoc +++ b/docs/reference/release-notes/7.4.asciidoc @@ -35,7 +35,6 @@ Snapshot/Restore:: === Breaking Java changes Geo:: -* Geo: Change order of parameter in Geometries to lon, lat 7.x {pull}45618[#45618] (issues: {issue}45048[#45048], {issue}45332[#45332]) * Geo: Change order of parameter in Geometries to lon, lat {pull}45332[#45332] (issue: {issue}45048[#45048]) Network:: @@ -93,6 +92,7 @@ Geo:: * [SPATIAL] New ShapeQueryBuilder for querying indexed cartesian geometry {pull}45108[#45108] (issue: {issue}44980[#44980]) * [GEO] New ShapeFieldMapper for indexing cartesian geometries {pull}44980[#44980] * Add Circle Processor {pull}43851[#43851] (issue: {issue}43554[#43554]) +* New `shape` field type for indexing Cartesian Geometries {pull}43644[#43644] Machine Learning:: * Allow the user to specify 'query' in Evaluate Data Frame request {pull}45775[#45775] (issue: {issue}45729[#45729]) @@ -167,6 +167,7 @@ Features/Features:: * Disable String interning on field names for JSON parsing {pull}41039[#41039] (issue: {issue}39890[#39890]) Features/ILM:: +* Add node setting for disabling SLM {pull}46794[#46794] (issue: {issue}38461[#38461]) * Include in-progress snapshot for a policy with get SLM policy API {pull}45245[#45245] * Add option to filter ILM explain response {pull}44777[#44777] (issue: {issue}44189[#44189]) * Expose index age in ILM explain output {pull}44457[#44457] (issue: {issue}38988[#38988]) @@ -201,7 +202,6 @@ Geo:: Infra/Core:: * Add OCI annotations and adjust existing annotations {pull}45167[#45167] (issues: {issue}45162[#45162], {issue}45166[#45166]) * Use the full hash in build info {pull}45163[#45163] (issue: {issue}45162[#45162]) -* Set start of the week to Monday for root locale {pull}43652[#43652] (issues: {issue}41670[#41670], {issue}42588[#42588], {issue}43275[#43275]) Infra/Packaging:: * Remove redundant Java check from Sys V init {pull}45793[#45793] (issue: {issue}45593[#45593]) @@ -216,6 +216,7 @@ Infra/Scripting:: * Remove stale permissions from untrusted policy {pull}44783[#44783] Infra/Settings:: +* Add more meaningful keystore version mismatch errors {pull}46291[#46291] (issue: {issue}44624[#44624]) * Lift the restrictions that uppercase is not allowed in Setting Name. {pull}45222[#45222] (issue: {issue}43835[#43835]) * Normalize environment paths {pull}45179[#45179] (issue: {issue}45176[#45176]) @@ -242,7 +243,6 @@ Network:: * Move ConnectionManager to async APIs {pull}42636[#42636] Ranking:: -* Precompute vector length on indexing {pull}45390[#45390] * Search enhancement: pinned queries {pull}44345[#44345] (issue: {issue}44074[#44074]) * Fix parameter value for calling data.advanceExact {pull}44205[#44205] * Add l1norm and l2norm distances for vectors {pull}44116[#44116] (issue: {issue}37947[#37947]) @@ -255,6 +255,7 @@ Recovery:: * Make peer recovery send file chunks async {pull}44468[#44468] (issues: {issue}36195[#36195], {issue}44040[#44040]) SQL:: +* SQL: Support queries with HAVING over SELECT {pull}46709[#46709] (issue: {issue}37051[#37051]) * SQL: Break TextFormatter/Cursor dependency {pull}45613[#45613] (issue: {issue}45516[#45516]) * SQL: remove deprecated use of "interval" from date_histogram usage {pull}45501[#45501] (issue: {issue}43922[#43922]) * SQL: use hasValue() methods from Elasticsearch's InspectionHelper classes {pull}44745[#44745] (issue: {issue}35745[#35745]) @@ -300,12 +301,15 @@ Analysis:: * Fix AnalyzeAction response serialization {pull}44284[#44284] (issue: {issue}44078[#44078]) Authentication:: +* Fallback to realm authc if ApiKey fails {pull}46538[#46538] +* Enforce realm name uniqueness {pull}46253[#46253] * Allow empty token endpoint for implicit flow {pull}45038[#45038] * Do not use scroll when finding duplicate API key {pull}45026[#45026] * Fix broken short-circuit in getUnlicensedRealms {pull}44399[#44399] * Fix X509AuthenticationToken principal {pull}43932[#43932] (issues: {issue}34396[#34396], {issue}43796[#43796]) Authorization:: +* Give kibana user privileges to create APM agent config index {pull}46765[#46765] (issue: {issue}45610[#45610]) * Add `manage_own_api_key` cluster privilege {pull}45696[#45696] (issue: {issue}40031[#40031]) * Sparse role queries can throw an NPE {pull}45053[#45053] @@ -316,6 +320,7 @@ CCR:: * Avoid NPE when checking for CCR index privileges {pull}44397[#44397] (issue: {issue}44172[#44172]) CRUD:: +* Ignore replication for noop updates {pull}46458[#46458] (issues: {issue}41065[#41065], {issue}44603[#44603], {issue}46366[#46366]) * Allow _update on write alias {pull}45318[#45318] (issue: {issue}31520[#31520]) * Do not allow version in Rest Update API {pull}43516[#43516] (issue: {issue}42497[#42497]) @@ -323,6 +328,8 @@ Cluster Coordination:: * Avoid counting votes from master-ineligible nodes {pull}43688[#43688] Data Frame:: +* [ML][Transform] Use field_caps API for mapping deduction {pull}46703[#46703] (issue: {issue}46694[#46694]) +* [ML-DataFrame] Fix off-by-one error in checkpoint operations_behind {pull}46235[#46235] * [ML][Data Frame] moves failure state transition for MT safety {pull}45676[#45676] (issue: {issue}45664[#45664]) * [ML][Data Frame] fixing _start?force=true bug {pull}45660[#45660] * [ML][Data frame] fixing failure state transitions and race condition {pull}45627[#45627] (issues: {issue}45562[#45562], {issue}45609[#45609]) @@ -337,6 +344,8 @@ Data Frame:: * [ML-DataFrame] audit message missing for autostop {pull}43984[#43984] (issue: {issue}43977[#43977]) Distributed:: +* Fix false positive out of sync warning in synced-flush {pull}46576[#46576] (issues: {issue}28464[#28464], {issue}30244[#30244]) +* Suppress warning logs from background sync on relocated primary {pull}46247[#46247] (issues: {issue}40800[#40800], {issue}42241[#42241]) * Ensure AsyncTask#isScheduled remain false after close {pull}45687[#45687] (issue: {issue}45576[#45576]) * Update translog checkpoint after marking operations as persisted {pull}45634[#45634] (issue: {issue}29161[#29161]) * Fix clock used in update requests {pull}45262[#45262] (issue: {issue}45254[#45254]) @@ -351,6 +360,9 @@ Engine:: * Fail engine if hit document failure on replicas {pull}43523[#43523] (issues: {issue}40435[#40435], {issue}43228[#43228]) Features/ILM:: +* Handle partial failure retrieving segments in SegmentCountStep {pull}46556[#46556] +* Fixes for API specification {pull}46522[#46522] +* Fix SnapshotLifecycleMetadata xcontent serialization {pull}46500[#46500] (issue: {issue}46499[#46499]) * Updated slm API spec parameters and URL {pull}44797[#44797] * Fix swapped variables in error message {pull}44300[#44300] @@ -360,11 +372,18 @@ Features/Indices APIs:: * Check shard limit after applying index templates {pull}44619[#44619] (issues: {issue}34021[#34021], {issue}44567[#44567], {issue}44619[#44619]) * Validate index settings after applying templates {pull}44612[#44612] (issues: {issue}34021[#34021], {issue}44567[#44567]) +Features/Ingest:: +* Allow dropping documents with auto-generated ID {pull}46773[#46773] (issue: {issue}46678[#46678]) + Features/Java High Level REST Client:: +* HLRC multisearchTemplate forgot params {pull}46492[#46492] (issue: {issue}46488[#46488]) +* terminateAfter added to the RequestConverter {pull}46474[#46474] (issue: {issue}46446[#46446]) * [Closes #44045] Added 'slices' parameter when submitting reindex request via Java high level REST client {pull}45690[#45690] (issue: {issue}44045[#44045]) * HLRC: Fix '+' Not Correctly Encoded in GET Req. {pull}33164[#33164] (issue: {issue}33077[#33077]) Features/Watcher:: +* Fix class used to initialize logger in Watcher {pull}46467[#46467] +* Fix wrong URL encoding in watcher HTTP client {pull}45894[#45894] (issue: {issue}44970[#44970]) * Fix watcher HttpClient URL creation {pull}45207[#45207] (issue: {issue}44970[#44970]) * Log write failures for watcher history document. {pull}44129[#44129] @@ -372,6 +391,9 @@ Geo:: * Geo: fix geo query decomposition {pull}44924[#44924] (issue: {issue}44891[#44891]) * Geo: add validator that only checks altitude {pull}43893[#43893] +Highlighting:: +* Fix highlighting for script_score query {pull}46507[#46507] (issue: {issue}46471[#46471]) + Infra/Core:: * Always check that cgroup data is present {pull}45606[#45606] (issue: {issue}45396[#45396]) * Safe publication of DelayedAllocationService and SnapshotShardsService {pull}45517[#45517] (issue: {issue}38560[#38560]) @@ -431,13 +453,18 @@ Recovery:: * Never release store using CancellableThreads {pull}45409[#45409] (issues: {issue}45136[#45136], {issue}45237[#45237]) * Remove leniency in reset engine from translog {pull}44711[#44711] -Reindex:: -* Fix issues with serializing BulkByScrollResponse {pull}45357[#45357] - Rollup:: * Fix Rollup job creation to work with templates {pull}43943[#43943] SQL:: +* SQL: Properly handle indices with no/empty mapping {pull}46775[#46775] (issue: {issue}46757[#46757]) +* SQL: improve ResultSet behavior when no rows are available {pull}46753[#46753] (issue: {issue}46750[#46750]) +* SQL: use the correct data type for types conversion {pull}46574[#46574] (issue: {issue}46090[#46090]) +* SQL: Fix issue with common type resolution {pull}46565[#46565] (issue: {issue}46551[#46551]) +* SQL: fix scripting for grouped by datetime functions {pull}46421[#46421] (issue: {issue}40241[#40241]) +* SQL: Use null schema response {pull}46386[#46386] (issue: {issue}46381[#46381]) +* SQL: Fix issue with IIF function when condition folds {pull}46290[#46290] (issue: {issue}46268[#46268]) +* SQL: Fix issue with DataType for CASE with NULL {pull}46173[#46173] (issue: {issue}46032[#46032]) * SQL: adds format parameter to range queries for constant date comparisons {pull}45326[#45326] (issue: {issue}45139[#45139]) * SQL: uniquely named inner_hits sections for each nested field condition {pull}45039[#45039] (issues: {issue}33080[#33080], {issue}44544[#44544]) * SQL: fix URI path being lost in case of hosted ES scenario {pull}44776[#44776] (issue: {issue}44721[#44721]) @@ -447,6 +474,7 @@ SQL:: * SQL: handle SQL not being available in a more graceful way {pull}43665[#43665] (issue: {issue}41279[#41279]) Search:: +* Multi-get requests should wait for search active {pull}46283[#46283] (issue: {issue}27500[#27500]) * Ensure top docs optimization is fully disabled for queries with unbounded max scores. {pull}46105[#46105] (issue: {issue}45933[#45933]) * Disallow partial results when shard unavailable {pull}45739[#45739] (issue: {issue}42612[#42612]) * Prevent Leaking Search Tasks on Exceptions in FetchSearchPhase and DfsQueryPhase {pull}45500[#45500] @@ -454,6 +482,7 @@ Search:: * Don't use index_phrases on graph queries {pull}44340[#44340] (issue: {issue}43976[#43976]) Security:: +* Initialize document subset bit set cache used for DLS {pull}46211[#46211] (issue: {issue}45147[#45147]) * Fix .security-* indices auto-create {pull}44918[#44918] * SecurityIndexManager handle RuntimeException while reading mapping {pull}44409[#44409] * Do not swallow I/O exception getting authentication {pull}44398[#44398] (issues: {issue}44172[#44172], {issue}44397[#44397]) @@ -471,6 +500,15 @@ Task Management:: +[[regression-7.4.0]] +[float] +=== Regressions + +Aggregations:: +* Implement rounding optimization for fixed offset timezones {pull}46670[#46670] (issue: {issue}45702[#45702]) + + + [[upgrade-7.4.0]] [float] === Upgrades @@ -486,6 +524,3 @@ Network:: Search:: * Upgrade to lucene snapshot 8.3.0-snapshot-8dd116a6158 {pull}45604[#45604] (issue: {issue}43976[#43976]) - - - diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index ad9ef32f8ca..b4033dbda0a 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -30,7 +30,7 @@ that point to a missing or closed index. end::allow-no-indices[] tag::analyzer[] -analyzer`:: +`analyzer`:: (Optional, string) Analyzer to use for the query string. end::analyzer[] @@ -202,6 +202,12 @@ tag::if_seq_no[] sequence number. See <>. end::if_seq_no[] +tag::ignore_throttled[] +`ignore_throttled`:: +(Optional, boolean) If `true`, concrete, expanded or aliased indices are +ignored when throttled. +end::ignore_throttled[] + tag::index-ignore-unavailable[] `ignore_unavailable`:: (Optional, boolean) If `true`, missing or closed indices are not included in the @@ -404,6 +410,12 @@ tag::search-q[] (Optional, string) Query in the Lucene query string syntax. end::search-q[] +tag::query[] +`query`:: +(Optional, <>) Defines the search definition using the +<>. +end::query[] + tag::refresh[] `refresh`:: (Optional, enum) If `true`, {es} refreshes the affected shards to make this diff --git a/docs/reference/rest-api/defs.asciidoc b/docs/reference/rest-api/defs.asciidoc index efeb6c60923..75e496903f5 100644 --- a/docs/reference/rest-api/defs.asciidoc +++ b/docs/reference/rest-api/defs.asciidoc @@ -8,20 +8,20 @@ These resource definitions are used in APIs related to {ml-features} and * <> * <> * <> -* <> * <> * <> * <> * <> * <> * <> +* <> include::{es-repo-dir}/ml/anomaly-detection/apis/datafeedresource.asciidoc[] include::{es-repo-dir}/ml/df-analytics/apis/dfanalyticsresources.asciidoc[] -include::{es-repo-dir}/data-frames/apis/transformresource.asciidoc[] include::{es-repo-dir}/ml/df-analytics/apis/evaluateresources.asciidoc[] include::{es-repo-dir}/ml/anomaly-detection/apis/jobresource.asciidoc[] include::{es-repo-dir}/ml/anomaly-detection/apis/jobcounts.asciidoc[] include::{es-repo-dir}/ml/anomaly-detection/apis/snapshotresource.asciidoc[] include::{xes-repo-dir}/rest-api/security/role-mapping-resources.asciidoc[] include::{es-repo-dir}/ml/anomaly-detection/apis/resultsresource.asciidoc[] +include::{es-repo-dir}/transform/apis/transformresource.asciidoc[] diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 7bb821d729c..7250f146608 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -14,7 +14,6 @@ not be included yet. * <> * <> * <> -* <> * <> * <> * <> @@ -31,6 +30,7 @@ not be included yet. * <> * <> * <> +* <> * <> -- @@ -38,7 +38,6 @@ include::{es-repo-dir}/api-conventions.asciidoc[] include::{es-repo-dir}/cat.asciidoc[] include::{es-repo-dir}/cluster.asciidoc[] include::{es-repo-dir}/ccr/apis/ccr-apis.asciidoc[] -include::{es-repo-dir}/data-frames/apis/index.asciidoc[] include::{es-repo-dir}/docs.asciidoc[] include::{es-repo-dir}/ingest/apis/enrich/index.asciidoc[] include::{es-repo-dir}/graph/explore.asciidoc[] @@ -55,5 +54,6 @@ include::{es-repo-dir}/rollup/rollup-api.asciidoc[] include::{es-repo-dir}/search.asciidoc[] include::{xes-repo-dir}/rest-api/security.asciidoc[] include::{es-repo-dir}/ilm/apis/slm-api.asciidoc[] +include::{es-repo-dir}/transform/apis/index.asciidoc[] include::{xes-repo-dir}/rest-api/watcher.asciidoc[] include::defs.asciidoc[] diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index 3de442285d8..9aae2c7331e 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -41,11 +41,10 @@ The information provided by this API includes: The following example queries the info API: -[source,js] +[source,console] ------------------------------------------------------------ GET /_xpack ------------------------------------------------------------ -// CONSOLE Example response: @@ -68,10 +67,6 @@ Example response: "available" : true, "enabled" : true }, - "data_frame" : { - "available" : true, - "enabled" : true - }, "analytics" : { "available" : true, "enabled" : true @@ -124,6 +119,10 @@ Example response: "available" : true, "enabled" : true }, + "transform" : { + "available" : true, + "enabled" : true + }, "vectors" : { "available" : true, "enabled" : true @@ -150,16 +149,14 @@ Example response: The following example only returns the build and features information: -[source,js] +[source,console] ------------------------------------------------------------ GET /_xpack?categories=build,features ------------------------------------------------------------ -// CONSOLE The following example removes the descriptions from the response: -[source,js] +[source,console] ------------------------------------------------------------ GET /_xpack?human=false ------------------------------------------------------------ -// CONSOLE diff --git a/docs/reference/rollup/apis/delete-job.asciidoc b/docs/reference/rollup/apis/delete-job.asciidoc index 1a36a146da6..e5babce43b2 100644 --- a/docs/reference/rollup/apis/delete-job.asciidoc +++ b/docs/reference/rollup/apis/delete-job.asciidoc @@ -76,11 +76,10 @@ POST my_rollup_index/_delete_by_query If we have a rollup job named `sensor`, it can be deleted with: -[source,js] +[source,console] -------------------------------------------------- DELETE _rollup/job/sensor -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_rollup_job] Which will return the response: diff --git a/docs/reference/rollup/apis/get-job.asciidoc b/docs/reference/rollup/apis/get-job.asciidoc index 4b3e336e42f..20ed289b929 100644 --- a/docs/reference/rollup/apis/get-job.asciidoc +++ b/docs/reference/rollup/apis/get-job.asciidoc @@ -79,11 +79,10 @@ state is set, the job will remove itself from the cluster. If we have already created a rollup job named `sensor`, the details about the job can be retrieved with: -[source,js] +[source,console] -------------------------------------------------- GET _rollup/job/sensor -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_rollup_job] The API yields the following response: @@ -154,7 +153,7 @@ The API yields the following response: The `jobs` array contains a single job (`id: sensor`) since we requested a single job in the endpoint's URL. If we add another job, we can see how multi-job responses are handled: -[source,js] +[source,console] -------------------------------------------------- PUT _rollup/job/sensor2 <1> { @@ -186,7 +185,6 @@ PUT _rollup/job/sensor2 <1> GET _rollup/job/_all <2> -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_rollup_job] <1> We create a second job with name `sensor2` <2> Then request all jobs by using `_all` in the GetJobs API diff --git a/docs/reference/rollup/apis/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc index 7a094643687..ed91d40bde5 100644 --- a/docs/reference/rollup/apis/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -70,7 +70,7 @@ For more details about the job configuration, see <>. The following example creates a {rollup-job} named "sensor", targeting the "sensor-*" index pattern: -[source,js] +[source,console] -------------------------------------------------- PUT _rollup/job/sensor { @@ -100,7 +100,6 @@ PUT _rollup/job/sensor ] } -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_index] When the job is created, you receive the following results: diff --git a/docs/reference/rollup/apis/rollup-caps.asciidoc b/docs/reference/rollup/apis/rollup-caps.asciidoc index 3d055bf90de..58100513201 100644 --- a/docs/reference/rollup/apis/rollup-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-caps.asciidoc @@ -51,7 +51,7 @@ Imagine we have an index named `sensor-1` full of raw data. We know that the da will be a `sensor-2`, `sensor-3`, etc. Let's create a Rollup job that targets the index pattern `sensor-*` to accommodate this future scaling: -[source,js] +[source,console] -------------------------------------------------- PUT _rollup/job/sensor { @@ -81,16 +81,14 @@ PUT _rollup/job/sensor ] } -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_index] We can then retrieve the rollup capabilities of that index pattern (`sensor-*`) via the following command: -[source,js] +[source,console] -------------------------------------------------- GET _rollup/data/sensor-* -------------------------------------------------- -// CONSOLE // TEST[continued] Which will yield the following response: @@ -155,20 +153,18 @@ configurations available. We could also retrieve the same information with a request to `_all`: -[source,js] +[source,console] -------------------------------------------------- GET _rollup/data/_all -------------------------------------------------- -// CONSOLE // TEST[continued] But note that if we use the concrete index name (`sensor-1`), we'll retrieve no rollup capabilities: -[source,js] +[source,console] -------------------------------------------------- GET _rollup/data/sensor-1 -------------------------------------------------- -// CONSOLE // TEST[continued] [source,console-result] diff --git a/docs/reference/rollup/apis/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc index 60776d8a9fd..8b475c8aa5e 100644 --- a/docs/reference/rollup/apis/rollup-index-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-index-caps.asciidoc @@ -42,7 +42,7 @@ For more information, see Imagine we have an index named `sensor-1` full of raw data. We know that the data will grow over time, so there will be a `sensor-2`, `sensor-3`, etc. Let's create a Rollup job, which stores it's data in `sensor_rollup`: -[source,js] +[source,console] -------------------------------------------------- PUT _rollup/job/sensor { @@ -72,17 +72,15 @@ PUT _rollup/job/sensor ] } -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_index] If at a later date, we'd like to determine what jobs and capabilities were stored in the `sensor_rollup` index, we can use the Get Rollup Index API: -[source,js] +[source,console] -------------------------------------------------- GET /sensor_rollup/_rollup/data -------------------------------------------------- -// CONSOLE // TEST[continued] Note how we are requesting the concrete rollup index name (`sensor_rollup`) as the first part of the URL. @@ -150,10 +148,9 @@ configurations available. Like other APIs that interact with indices, you can specify index patterns instead of explicit indices: -[source,js] +[source,console] -------------------------------------------------- GET /*_rollup/_rollup/data -------------------------------------------------- -// CONSOLE // TEST[continued] diff --git a/docs/reference/rollup/apis/rollup-job-config.asciidoc b/docs/reference/rollup/apis/rollup-job-config.asciidoc index 8277834d5e4..e0ca2b73320 100644 --- a/docs/reference/rollup/apis/rollup-job-config.asciidoc +++ b/docs/reference/rollup/apis/rollup-job-config.asciidoc @@ -13,7 +13,7 @@ should be grouped on, and what metrics to collect for each group. A full job configuration might look like this: -[source,js] +[source,console] -------------------------------------------------- PUT _rollup/job/sensor { @@ -47,7 +47,6 @@ PUT _rollup/job/sensor ] } -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_index] ==== Logistical Details diff --git a/docs/reference/rollup/apis/rollup-search.asciidoc b/docs/reference/rollup/apis/rollup-search.asciidoc index 127d06f6df9..d5d43be0929 100644 --- a/docs/reference/rollup/apis/rollup-search.asciidoc +++ b/docs/reference/rollup/apis/rollup-search.asciidoc @@ -51,7 +51,7 @@ omitted entirely. Imagine we have an index named `sensor-1` full of raw data, and we have created a rollup job with the following configuration: -[source,js] +[source,console] -------------------------------------------------- PUT _rollup/job/sensor { @@ -81,14 +81,13 @@ PUT _rollup/job/sensor ] } -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_index] This rolls up the `sensor-*` pattern and stores the results in `sensor_rollup`. To search this rolled up data, we need to use the `_rollup_search` endpoint. However, you'll notice that we can use regular query DSL to search the rolled-up data: -[source,js] +[source,console] -------------------------------------------------- GET /sensor_rollup/_rollup_search { @@ -102,7 +101,6 @@ GET /sensor_rollup/_rollup_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_prefab_data] // TEST[s/_rollup_search/_rollup_search?filter_path=took,timed_out,terminated_early,_shards,hits,aggregations/] @@ -141,7 +139,7 @@ Rollup searches are limited to functionality that was configured in the rollup j the average temperature because `avg` was not one of the configured metrics for the `temperature` field. If we try to execute that search: -[source,js] +[source,console] -------------------------------------------------- GET sensor_rollup/_rollup_search { @@ -155,7 +153,6 @@ GET sensor_rollup/_rollup_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] // TEST[catch:/illegal_argument_exception/] @@ -185,7 +182,7 @@ The Rollup Search API has the capability to search across both "live", non-rollu data. This is done by simply adding the live indices to the URI: -[source,js] +[source,console] -------------------------------------------------- GET sensor-1,sensor_rollup/_rollup_search <1> { @@ -199,7 +196,6 @@ GET sensor-1,sensor_rollup/_rollup_search <1> } } -------------------------------------------------- -// CONSOLE // TEST[continued] // TEST[s/_rollup_search/_rollup_search?filter_path=took,timed_out,terminated_early,_shards,hits,aggregations/] <1> Note the URI now searches `sensor-1` and `sensor_rollup` at the same time diff --git a/docs/reference/rollup/apis/start-job.asciidoc b/docs/reference/rollup/apis/start-job.asciidoc index 27183df64fc..da31181a016 100644 --- a/docs/reference/rollup/apis/start-job.asciidoc +++ b/docs/reference/rollup/apis/start-job.asciidoc @@ -47,11 +47,10 @@ to start a job that is already started, nothing happens. If we have already created a {rollup-job} named `sensor`, it can be started with: -[source,js] +[source,console] -------------------------------------------------- POST _rollup/job/sensor/_start -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_rollup_job] Which will return the response: diff --git a/docs/reference/rollup/apis/stop-job.asciidoc b/docs/reference/rollup/apis/stop-job.asciidoc index 36c963d53fe..a669a6b56c9 100644 --- a/docs/reference/rollup/apis/stop-job.asciidoc +++ b/docs/reference/rollup/apis/stop-job.asciidoc @@ -72,11 +72,10 @@ the indexer has fully stopped. This is accomplished with the `wait_for_completion` query parameter, and optionally a `timeout`: -[source,js] +[source,console] -------------------------------------------------- POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_started_rollup_job] The parameter blocks the API call from returning until either the job has moved diff --git a/docs/reference/rollup/rollup-getting-started.asciidoc b/docs/reference/rollup/rollup-getting-started.asciidoc index 06bca3c9d48..27f9d9cd406 100644 --- a/docs/reference/rollup/rollup-getting-started.asciidoc +++ b/docs/reference/rollup/rollup-getting-started.asciidoc @@ -28,7 +28,7 @@ look like this: We'd like to rollup these documents into hourly summaries, which will allow us to generate reports and dashboards with any time interval one hour or greater. A rollup job might look like this: -[source,js] +[source,console] -------------------------------------------------- PUT _rollup/job/sensor { @@ -57,7 +57,6 @@ PUT _rollup/job/sensor ] } -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_index] We give the job the ID of "sensor" (in the url: `PUT _rollup/job/sensor`), and tell it to rollup the index pattern `"sensor-*"`. @@ -111,11 +110,10 @@ you to stop them later as a way to temporarily pause, without deleting the confi To start the job, execute this command: -[source,js] +[source,console] -------------------------------------------------- POST _rollup/job/sensor/_start -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_rollup_job] [float] @@ -126,7 +124,7 @@ so that you can use the same Query DSL syntax that you are accustomed to... it j For example, take this query: -[source,js] +[source,console] -------------------------------------------------- GET /sensor_rollup/_rollup_search { @@ -140,7 +138,6 @@ GET /sensor_rollup/_rollup_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_prefab_data] It's a simple aggregation that calculates the maximum of the `temperature` field. But you'll notice that is is being sent to the `sensor_rollup` @@ -184,7 +181,7 @@ is nearly identical to normal DSL, making it easy to integrate into dashboards a Finally, we can use those grouping fields we defined to construct a more complicated query: -[source,js] +[source,console] -------------------------------------------------- GET /sensor_rollup/_rollup_search { @@ -218,7 +215,6 @@ GET /sensor_rollup/_rollup_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_prefab_data] Which returns a corresponding response: diff --git a/docs/reference/rollup/rollup-search-limitations.asciidoc b/docs/reference/rollup/rollup-search-limitations.asciidoc index 5b53cf615e1..d55787f3cec 100644 --- a/docs/reference/rollup/rollup-search-limitations.asciidoc +++ b/docs/reference/rollup/rollup-search-limitations.asciidoc @@ -41,7 +41,7 @@ rollup job to store metrics about the `price` field, you won't be able to use th For example, the `temperature` field in the following query has been stored in a rollup job... but not with an `avg` metric. Which means the usage of `avg` here is not allowed: -[source,js] +[source,console] -------------------------------------------------- GET sensor_rollup/_rollup_search { @@ -55,7 +55,6 @@ GET sensor_rollup/_rollup_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sensor_prefab_data] // TEST[catch:/illegal_argument_exception/] diff --git a/docs/reference/scripting/engine.asciidoc b/docs/reference/scripting/engine.asciidoc index b4a2cd29fdd..54d85e6e823 100644 --- a/docs/reference/scripting/engine.asciidoc +++ b/docs/reference/scripting/engine.asciidoc @@ -24,7 +24,7 @@ You can execute the script by specifying its `lang` as `expert_scripts`, and the of the script as the script source: -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -53,5 +53,4 @@ POST /_search } } -------------------------------------------------- -// CONSOLE // TEST[skip:we don't have an expert script plugin installed to test this] diff --git a/docs/reference/scripting/fields.asciidoc b/docs/reference/scripting/fields.asciidoc index ef8aee98ced..6308fcdf623 100644 --- a/docs/reference/scripting/fields.asciidoc +++ b/docs/reference/scripting/fields.asciidoc @@ -42,7 +42,7 @@ Here's an example of using a script in a <> to alter the relevance `_score` of each document: -[source,js] +[source,console] ------------------------------------- PUT my_index/_doc/1?refresh { @@ -75,7 +75,6 @@ GET my_index/_search } } ------------------------------------- -// CONSOLE [float] @@ -87,7 +86,7 @@ script is to use the `doc['field_name']` syntax, which retrieves the field value from <>. Doc values are a columnar field value store, enabled by default on all fields except for <>. -[source,js] +[source,console] ------------------------------- PUT my_index/_doc/1?refresh { @@ -109,7 +108,6 @@ GET my_index/_search } } ------------------------------- -// CONSOLE Doc-values can only return "simple" field values like numbers, dates, geo- points, terms, etc, or arrays of these values if the field is multi-valued. @@ -170,7 +168,7 @@ doc values. For instance: -[source,js] +[source,console] ------------------------------- PUT my_index { @@ -216,7 +214,7 @@ GET my_index/_search } } ------------------------------- -// CONSOLE + <1> The `title` field is not stored and so cannot be used with the `_fields[]` syntax. <2> The `title` field can still be accessed from the `_source`. diff --git a/docs/reference/scripting/using.asciidoc b/docs/reference/scripting/using.asciidoc index 5060c7fc67d..31b8612ce13 100644 --- a/docs/reference/scripting/using.asciidoc +++ b/docs/reference/scripting/using.asciidoc @@ -20,7 +20,7 @@ the same pattern: For example, the following script is used in a search request to return a <>: -[source,js] +[source,console] ------------------------------------- PUT my_index/_doc/1 { @@ -42,7 +42,6 @@ GET my_index/_search } } ------------------------------------- -// CONSOLE [float] === Script parameters @@ -144,7 +143,7 @@ The following are examples of using a stored script that lives at First, create the script called `calculate-score` in the cluster state: -[source,js] +[source,console] ----------------------------------- POST _scripts/calculate-score { @@ -154,20 +153,18 @@ POST _scripts/calculate-score } } ----------------------------------- -// CONSOLE This same script can be retrieved with: -[source,js] +[source,console] ----------------------------------- GET _scripts/calculate-score ----------------------------------- -// CONSOLE // TEST[continued] Stored scripts can be used by specifying the `id` parameters as follows: -[source,js] +[source,console] -------------------------------------------------- GET _search { @@ -183,16 +180,14 @@ GET _search } } -------------------------------------------------- -// CONSOLE // TEST[continued] And deleted with: -[source,js] +[source,console] ----------------------------------- DELETE _scripts/calculate-score ----------------------------------- -// CONSOLE // TEST[continued] [float] diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 84ff2e45282..e8a8a42da6a 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -14,22 +14,21 @@ Which shards will be searched on can also be controlled by providing the `routing` parameter. For example, when indexing tweets, the routing value can be the user name: -[source,js] +[source,console] -------------------------------------------------- POST /twitter/_doc?routing=kimchy { "user" : "kimchy", - "postDate" : "2009-11-15T14:12:12", + "post_date" : "2009-11-15T14:12:12", "message" : "trying out Elasticsearch" } -------------------------------------------------- -// CONSOLE In such a case, if we want to search only on the tweets for a specific user, we can specify it as the routing, resulting in the search hitting only the relevant shard: -[source,js] +[source,console] -------------------------------------------------- POST /twitter/_search?routing=kimchy { @@ -47,7 +46,6 @@ POST /twitter/_search?routing=kimchy } } -------------------------------------------------- -// CONSOLE // TEST[continued] The routing parameter can be multi valued represented as a comma @@ -70,7 +68,7 @@ based on a number of criteria: This can be turned off by changing the dynamic cluster setting `cluster.routing.use_adaptive_replica_selection` from `true` to `false`: -[source,js] +[source,console] -------------------------------------------------- PUT /_cluster/settings { @@ -79,7 +77,6 @@ PUT /_cluster/settings } } -------------------------------------------------- -// CONSOLE If adaptive replica selection is turned off, searches are sent to the index/indices shards in a round robin fashion between all copies of the data @@ -95,7 +92,7 @@ statistics aggregation per group. It can later be retrieved using the specifically. For example, here is a search body request that associate the request with two different groups: -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -105,7 +102,6 @@ POST /_search "stats" : ["group1", "group2"] } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [float] @@ -130,13 +126,10 @@ Setting this value to `-1` resets the global search timeout to no timeout. === Search Cancellation Searches can be cancelled using standard <> -mechanism. By default, a running search only checks if it is cancelled or -not on segment boundaries, therefore the cancellation can be delayed by large -segments. The search cancellation responsiveness can be improved by setting -the dynamic cluster-level setting `search.low_level_cancellation` to `true`. -However, it comes with an additional overhead of more frequent cancellation -checks that can be noticeable on large fast running search queries. Changing this -setting only affects the searches that start after the change is made. +mechanism and are also automatically cancelled when the http connection used to +perform the request is closed by the client. It is fundamental that the http +client sending requests closes connections whenever requests time out or are +aborted. [float] [[search-concurrency-and-parallelism]] diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index 331cf70685e..1aa6eb51959 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -1,11 +1,90 @@ [[search-count]] === Count API -The count API allows to easily execute a query and get the number of -matches for that query. It can be executed across one or more indices. -The query can either be provided using a simple query string as a -parameter, or using the <> defined within the request -body. Here is an example: +Gets the number of matches for a search query. + +[source,console] +-------------------------------------------------- +GET /twitter/_count?q=user:kimchy +-------------------------------------------------- +// TEST[setup:twitter] + +NOTE: The query being sent in the body must be nested in a `query` key, same as +the <> works. + + +[[search-count-api-request]] +==== {api-request-title} + +`PUT //_count` + + +[[search-count-api-desc]] +==== {api-description-title} + +The count API allows you to execute a query and get the number of matches for +that query. It can be executed across one or more indices. The query can either +be provided using a simple query string as a parameter, or using the +<> defined within the request body. + +The count API can be applied to <>. + +The operation is broadcast across all shards. For each shard id group, a replica +is chosen and executed against it. This means that replicas increase the +scalability of count. + + +[[search-count-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] + + +[[search-count-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=analyzer] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=analyze_wildcard] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=default_operator] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=df] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `open`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=lenient] + +`min_score`:: +(Optional, float) + Sets the minimum `_score` value that documents must have to be included in the + result. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=preference] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=search-q] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=routing] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=terminate_after] + + +[[search-count-request-body]] +==== {api-request-body-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=query] + + +[[search-count-api-example]] +==== {api-examples-title} [source,console] -------------------------------------------------- @@ -24,11 +103,8 @@ GET /twitter/_count } -------------------------------------------------- -NOTE: The query being sent in the body must be nested in a `query` key, same as -the <> works - -Both examples above do the same thing, which is count the number of -tweets from the `twitter` index for a certain user. The result is: +Both examples above do the same: count the number of tweets from the `twitter` +index for a certain user. The API returns the following response: [source,console-result] -------------------------------------------------- @@ -45,62 +121,3 @@ tweets from the `twitter` index for a certain user. The result is: The query is optional, and when not provided, it will use `match_all` to count all the docs. - -[float] -==== Multi index - -The count API can be applied to <>. - -[float] -==== Request Parameters - -When executing count using the query parameter `q`, the query passed is -a query string using Lucene query parser. There are additional -parameters that can be passed: - -[cols="<,<",options="header",] -|======================================================================= -|Name |Description -|`df` |The default field to use when no field prefix is defined within the -query. - -|`analyzer` |The analyzer name to be used when analyzing the query string. - -|`default_operator` |The default operator to be used, can be `AND` or -`OR`. Defaults to `OR`. - -|`lenient` |If set to true will cause format based failures (like -providing text to a numeric field) to be ignored. Defaults to false. - -|`analyze_wildcard` |Should wildcard and prefix queries be analyzed or -not. Defaults to `false`. - -|`terminate_after` |The maximum count for each shard, upon -reaching which the query execution will terminate early. -If set, the response will have a boolean field `terminated_early` to -indicate whether the query execution has actually terminated_early. -Defaults to no terminate_after. -|======================================================================= - -[float] -==== Request Body - -The count can use the <> within -its body in order to express the query that should be executed. The body -content can also be passed as a REST parameter named `source`. - -Both HTTP GET and HTTP POST can be used to execute count with body. -Since not all clients support GET with body, POST is allowed as well. - -[float] -==== Distributed - -The count operation is broadcast across all shards. For each shard id -group, a replica is chosen and executed against it. This means that -replicas increase the scalability of count. - -[float] -==== Routing - -The routing value (a comma separated list of the routing values) can be -specified to control which shards the count request will be executed on. diff --git a/docs/reference/search/explain.asciidoc b/docs/reference/search/explain.asciidoc index f1710679805..1b5d280559c 100644 --- a/docs/reference/search/explain.asciidoc +++ b/docs/reference/search/explain.asciidoc @@ -1,18 +1,10 @@ [[search-explain]] === Explain API -The explain api computes a score explanation for a query and a specific -document. This can give useful feedback whether a document matches or -didn't match a specific query. +Returns information about why a specific document matches (or doesn't match) a +query. -Note that a single index must be provided to the `index` parameter. - -[float] -==== Usage - -Full query example: - -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_explain/0 { @@ -21,10 +13,91 @@ GET /twitter/_explain/0 } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] -This will yield the following result: + +[[sample-api-request]] +==== {api-request-title} + +`GET //_explain/` +`POST //_explain/` + +[[sample-api-desc]] +==== {api-description-title} + +The explain API computes a score explanation for a query and a specific +document. This can give useful feedback whether a document matches or +didn't match a specific query. + + +[[sample-api-path-params]] +==== {api-path-parms-title} + +``:: + (Required, integer) Defines the document ID. + +``:: ++ +-- +(Required, string) +Index names used to limit the request. + +Only a single index name can be provided to this parameter. +-- + + +[[sample-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=analyzer] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=analyze_wildcard] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=default_operator] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=df] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=lenient] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=preference] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=search-q] + +`stored_fields`:: + (Optional, string) A comma-separated list of stored fields to return in the + response. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-routing] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source_excludes] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=source_includes] + + +[[sample-api-request-body]] +==== {api-request-body-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=query] + + +[[sample-api-example]] +==== {api-examples-title} + +[source,console] +-------------------------------------------------- +GET /twitter/_explain/0 +{ + "query" : { + "match" : { "message" : "elasticsearch" } + } +} +-------------------------------------------------- +// TEST[setup:twitter] + + +The API returns the following response: [source,console-result] -------------------------------------------------- @@ -100,66 +173,17 @@ This will yield the following result: } -------------------------------------------------- -There is also a simpler way of specifying the query via the `q` -parameter. The specified `q` parameter value is then parsed as if the -`query_string` query was used. Example usage of the `q` parameter in the -explain api: -[source,js] +There is also a simpler way of specifying the query via the `q` parameter. The +specified `q` parameter value is then parsed as if the `query_string` query was +used. Example usage of the `q` parameter in the +explain API: + +[source,console] -------------------------------------------------- GET /twitter/_explain/0?q=message:search -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] -This will yield the same result as the previous request. -[float] -==== All parameters: - -[horizontal] -`_source`:: - - Set to `true` to retrieve the `_source` of the document explained. You can also - retrieve part of the document by using `_source_includes` & `_source_excludes` (see <> for more details) - -`stored_fields`:: - Allows to control which stored fields to return as part of the - document explained. - -`routing`:: - Controls the routing in the case the routing was used - during indexing. - -`parent`:: - Same effect as setting the routing parameter. - -`preference`:: - Controls on which shard the explain is executed. - -`source`:: - Allows the data of the request to be put in the query - string of the url. - -`q`:: - The query string (maps to the query_string query). - -`df`:: - The default field to use when no field prefix is defined within - the query. - -`analyzer`:: - The analyzer name to be used when analyzing the query - string. Defaults to the default search analyzer. - -`analyze_wildcard`:: - Should wildcard and prefix queries be analyzed or - not. Defaults to false. - -`lenient`:: - If set to true will cause format based failures (like - providing text to a numeric field) to be ignored. Defaults to false. - -`default_operator`:: - The default operator to be used, can be AND or - OR. Defaults to OR. +The API returns the same result as the previous request. diff --git a/docs/reference/search/field-caps.asciidoc b/docs/reference/search/field-caps.asciidoc index c7d532a0b86..f94ac492d78 100644 --- a/docs/reference/search/field-caps.asciidoc +++ b/docs/reference/search/field-caps.asciidoc @@ -1,73 +1,108 @@ [[search-field-caps]] === Field Capabilities API -The field capabilities API allows to retrieve the capabilities of fields among multiple indices. +Allows you to retrieve the capabilities of fields among multiple indices. -The field capabilities API by default executes on all indices: - -[source,js] +[source,console] -------------------------------------------------- -GET _field_caps?fields=rating +GET /_field_caps?fields=rating -------------------------------------------------- -// CONSOLE -The request can also be restricted to specific indices: -[source,js] +[[search-field-caps-api-request]] +==== {api-request-title} + +`GET /_field_caps` + +`POST /_field_caps` + +`GET //_field_caps` + +`POST //_field_caps` + + +[[search-field-caps-api-desc]] +==== {api-description-title} + + +The field capabilities API returns the information about the capabilities of +fields among multiple indices. + + +[[search-field-caps-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] + + +[[search-field-caps-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +-- +Defaults to `open`. +-- + +include::{docdir}/rest-api/common-parms.asciidoc[tag=fields] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +`include_unmapped`:: + (Optional, boolean) If `true`, unmapped fields are included in the response. + Defaults to `false`. + + +[[search-field-caps-api-response-body]] +==== {api-response-body-title} + + + +`searchable`:: + Whether this field is indexed for search on all indices. + +`aggregatable`:: + Whether this field can be aggregated on all indices. + +`indices`:: + The list of indices where this field has the same type, or null if all indices + have the same type for the field. + +`non_searchable_indices`:: + The list of indices where this field is not searchable, or null if all indices + have the same definition for the field. + +`non_aggregatable_indices`:: + The list of indices where this field is not aggregatable, or null if all + indices have the same definition for the field. + + +[[search-field-caps-api-example]] +==== {api-examples-title} + + +The request can be restricted to specific indices: + +[source,console] -------------------------------------------------- GET twitter/_field_caps?fields=rating -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] -Supported request options: -[horizontal] -`fields`:: A list of fields to compute stats for. The field name supports wildcard notation. For example, using `text_*` - will cause all fields that match the expression to be returned. +The next example API call requests information about the `rating` and the +`title` fields: -[float] -==== Field Capabilities - -The field capabilities API returns the following information per field: - -[horizontal] -`searchable`:: - -Whether this field is indexed for search on all indices. - -`aggregatable`:: - -Whether this field can be aggregated on all indices. - -`indices`:: - -The list of indices where this field has the same type, -or null if all indices have the same type for the field. - -`non_searchable_indices`:: - -The list of indices where this field is not searchable, -or null if all indices have the same definition for the field. - -`non_aggregatable_indices`:: - -The list of indices where this field is not aggregatable, -or null if all indices have the same definition for the field. - - -[float] -==== Response format - -Request: - -[source,js] +[source,console] -------------------------------------------------- GET _field_caps?fields=rating,title -------------------------------------------------- -// CONSOLE -[source,js] +The API returns the following response: + +[source,console-result] -------------------------------------------------- { "indices": ["index1", "index2", "index3", "index4", "index5"], @@ -96,7 +131,7 @@ GET _field_caps?fields=rating,title } } -------------------------------------------------- -// NOTCONSOLE +// TESTRESPONSE[skip:historically skipped] <1> The field `rating` is defined as a long in `index1` and `index2` and as a `keyword` in `index3` and `index4`. @@ -104,22 +139,19 @@ and as a `keyword` in `index3` and `index4`. <3> The field `rating` is not searchable in `index4`. <4> The field `title` is defined as `text` in all indices. -[float] -==== Unmapped fields By default unmapped fields are ignored. You can include them in the response by adding a parameter called `include_unmapped` in the request: -[source,js] +[source,console] -------------------------------------------------- GET _field_caps?fields=rating,title&include_unmapped -------------------------------------------------- -// CONSOLE -In which case the response will contain an entry for each field that is present in -some indices but not all: +In which case the response will contain an entry for each field that is present +in some indices but not all: -[source,js] +[source,console-result] -------------------------------------------------- { "indices": ["index1", "index2", "index3"], @@ -158,7 +190,7 @@ some indices but not all: } } -------------------------------------------------- -// NOTCONSOLE +// TESTRESPONSE[skip:historically skipped] <1> The `rating` field is unmapped` in `index5`. <2> The `title` field is unmapped` in `index5`. diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index 51e0f680a3b..265c3be00cd 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -123,7 +123,7 @@ The endpoint allows to also search against an index/indices in the URI itself, in which case it will be used as the default unless explicitly defined otherwise in the header. For example: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_msearch {} @@ -133,7 +133,6 @@ GET twitter/_msearch {"index" : "twitter2"} {"query" : {"match_all" : {}}} -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The above will execute the search against the `twitter` index for all the @@ -157,7 +156,7 @@ Much like described in <> for the _search resource, _msearch also provides support for templates. Submit them like follows for inline templates: -[source,js] +[source,console] ----------------------------------------------- GET _msearch/template {"index" : "twitter"} @@ -165,13 +164,12 @@ GET _msearch/template {"index" : "twitter"} { "source" : "{ \"query\": { \"match_{{template}}\": {} } }", "params": { "template": "all" } } ----------------------------------------------- -// CONSOLE // TEST[setup:twitter] You can also create search templates: -[source,js] +[source,console] ------------------------------------------ POST /_scripts/my_template_1 { @@ -187,11 +185,10 @@ POST /_scripts/my_template_1 } } ------------------------------------------ -// CONSOLE // TEST[setup:twitter] -[source,js] +[source,console] ------------------------------------------ POST /_scripts/my_template_2 { @@ -207,12 +204,11 @@ POST /_scripts/my_template_2 } } ------------------------------------------ -// CONSOLE // TEST[continued] You can use search templates in a _msearch: -[source,js] +[source,console] ----------------------------------------------- GET _msearch/template {"index" : "main"} @@ -220,7 +216,6 @@ GET _msearch/template {"index" : "main"} { "id": "my_template_2", "params": { "field": "user", "value": "test" } } ----------------------------------------------- -// CONSOLE // TEST[continued] diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 2d5ceb67454..0d83d293052 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -18,7 +18,7 @@ many shards. Pretty-printing the response is recommended to help understand the Any `_search` request can be profiled by adding a top-level `profile` parameter: -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search { @@ -28,7 +28,6 @@ GET /twitter/_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] <1> Setting the top-level `profile` parameter to `true` will enable profiling @@ -376,11 +375,11 @@ The meaning of the stats are as follows: {empty} + Conjunctions (e.g. `must` clauses in a boolean) are typical consumers of `advance` -`matches`:: +`match`:: Some queries, such as phrase queries, match documents using a "two-phase" process. First, the document is "approximately" matched, and if it matches approximately, it is checked a second time with a more rigorous - (and expensive) process. The second phase verification is what the `matches` statistic measures. + (and expensive) process. The second phase verification is what the `match` statistic measures. {empty} + {empty} + For example, a phrase query first checks a document approximately by ensuring all terms in the phrase are @@ -389,7 +388,7 @@ The meaning of the stats are as follows: of the terms. {empty} + {empty} + - Because this two-phase process is only used by a handful of queries, the `metric` statistic will often be zero + Because this two-phase process is only used by a handful of queries, the `match` statistic is often zero `score`:: @@ -505,7 +504,7 @@ value is cumulative and contains the total time for all queries being rewritten. To demonstrate a slightly more complex query and the associated results, we can profile the following query: -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search { @@ -541,7 +540,6 @@ GET /twitter/_search } } -------------------------------------------------- -// CONSOLE // TEST[s/_search/_search\?filter_path=profile.shards.id,profile.shards.searches,profile.shards.aggregations/] // TEST[continued] @@ -704,7 +702,7 @@ The `aggregations` section contains detailed timing of the aggregation tree exec The overall structure of this aggregation tree will resemble your original Elasticsearch request. Let's execute the previous query again and look at the aggregation profile this time: -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search { @@ -740,7 +738,6 @@ GET /twitter/_search } } -------------------------------------------------- -// CONSOLE // TEST[s/_search/_search\?filter_path=profile.shards.aggregations/] // TEST[continued] diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index 4070e448931..9f5dd0bcb9e 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -147,7 +147,7 @@ Documents in the collection need to be rated either as relevant or irrelevant wi P@k does not take into account where in the top k results the relevant documents occur, so a ranking of ten results that contains one relevant result in position 10 is equally good as a ranking of ten results that contains one relevant result in position 1. -[source,js] +[source,console] -------------------------------- GET /twitter/_rank_eval { @@ -166,7 +166,6 @@ GET /twitter/_rank_eval } } -------------------------------- -// CONSOLE // TEST[setup:twitter] The `precision` metric takes the following optional parameters @@ -190,7 +189,7 @@ first relevant document. For example finding the first relevant result in position 3 means the reciprocal rank is 1/3. The reciprocal rank for each query is averaged across all queries in the test suite to give the https://en.wikipedia.org/wiki/Mean_reciprocal_rank[mean reciprocal rank]. -[source,js] +[source,console] -------------------------------- GET /twitter/_rank_eval { @@ -208,7 +207,6 @@ GET /twitter/_rank_eval } } -------------------------------- -// CONSOLE // TEST[setup:twitter] The `mean_reciprocal_rank` metric takes the following optional parameters @@ -229,7 +227,7 @@ In contrast to the two metrics above, https://en.wikipedia.org/wiki/Discounted_c The assumption is that highly relevant documents are more useful for the user when appearing at the top of the result list. Therefore, the DCG formula reduces the contribution that high ratings for documents on lower search ranks have on the overall DCG metric. -[source,js] +[source,console] -------------------------------- GET /twitter/_rank_eval { @@ -247,7 +245,6 @@ GET /twitter/_rank_eval } } -------------------------------- -// CONSOLE // TEST[setup:twitter] The `dcg` metric takes the following optional parameters: @@ -278,7 +275,7 @@ even more so if there are some relevant (but maybe less relevant) documents prec In this way, the ERR metric discounts documents which are shown after very relevant documents. This introduces a notion of dependency in the ordering of relevant documents that e.g. Precision or DCG don't account for. -[source,js] +[source,console] -------------------------------- GET /twitter/_rank_eval { @@ -296,7 +293,6 @@ GET /twitter/_rank_eval } } -------------------------------- -// CONSOLE // TEST[setup:twitter] The `expected_reciprocal_rank` metric takes the following parameters: diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index 9d34145232d..b5e1a4571bd 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -3,7 +3,7 @@ Specifies search criteria as request body parameters. -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search { @@ -12,7 +12,6 @@ GET /twitter/_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] @@ -89,7 +88,7 @@ all clients support GET with body, POST is allowed as well. [[search-request-body-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search { @@ -98,7 +97,6 @@ GET /twitter/_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] @@ -155,11 +153,10 @@ interested in the search results. Also we can set `terminate_after` to `1` to indicate that the query execution can be terminated whenever the first matching document was found (per shard). -[source,js] +[source,console] -------------------------------------------------- GET /_search?q=message:number&size=0&terminate_after=1 -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] diff --git a/docs/reference/search/request/collapse.asciidoc b/docs/reference/search/request/collapse.asciidoc index 8cc48fbcc63..7d8466566fa 100644 --- a/docs/reference/search/request/collapse.asciidoc +++ b/docs/reference/search/request/collapse.asciidoc @@ -5,7 +5,7 @@ Allows to collapse search results based on field values. The collapsing is done by selecting only the top sorted document per collapse key. For instance the query below retrieves the best tweet for each user and sorts them by number of likes. -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search { @@ -21,8 +21,8 @@ GET /twitter/_search "from": 10 <3> } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] + <1> collapse the result set using the "user" field <2> sort the top docs by number of likes <3> define the offset of the first collapsed result @@ -39,7 +39,7 @@ NOTE: The collapsing is applied to the top hits only and does not affect aggrega It is also possible to expand each collapsed top hits with the `inner_hits` option. -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search { @@ -60,8 +60,8 @@ GET /twitter/_search "sort": ["likes"] } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] + <1> collapse the result set using the "user" field <2> the name used for the inner hit section in the response <3> the number of inner_hits to retrieve per collapse key @@ -73,7 +73,7 @@ See <> for the complete list of supp It is also possible to request multiple `inner_hits` for each collapsed hit. This can be useful when you want to get multiple representations of the collapsed hits. -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search { @@ -100,8 +100,8 @@ GET /twitter/_search "sort": ["likes"] } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] + <1> collapse the result set using the "user" field <2> return the three most liked tweets for the user <3> return the three most recent tweets for the user diff --git a/docs/reference/search/request/docvalue-fields.asciidoc b/docs/reference/search/request/docvalue-fields.asciidoc index 6190eb6aef1..45e09dce78a 100644 --- a/docs/reference/search/request/docvalue-fields.asciidoc +++ b/docs/reference/search/request/docvalue-fields.asciidoc @@ -4,7 +4,7 @@ Allows to return the <> representation of a field for each hit, for example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -23,7 +23,7 @@ GET /_search ] } -------------------------------------------------- -// CONSOLE + <1> the name of the field <2> an object notation is supported as well <3> the object notation allows to specify a custom format @@ -32,7 +32,7 @@ Doc value fields can work on fields that have doc-values enabled, regardless of `*` can be used as a wild card, for example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -47,7 +47,7 @@ GET /_search ] } -------------------------------------------------- -// CONSOLE + <1> Match all fields ending with `field` <2> Format to be applied to all matching fields. diff --git a/docs/reference/search/request/explain.asciidoc b/docs/reference/search/request/explain.asciidoc index 704b958edd0..4b979034e38 100644 --- a/docs/reference/search/request/explain.asciidoc +++ b/docs/reference/search/request/explain.asciidoc @@ -3,7 +3,7 @@ Enables explanation for each hit on how its score was computed. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -13,4 +13,3 @@ GET /_search } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/search/request/from-size.asciidoc b/docs/reference/search/request/from-size.asciidoc index 60ecd9ac705..bf156a774fb 100644 --- a/docs/reference/search/request/from-size.asciidoc +++ b/docs/reference/search/request/from-size.asciidoc @@ -10,7 +10,7 @@ Though `from` and `size` can be set as request parameters, they can also be set within the search body. `from` defaults to `0`, and `size` defaults to `10`. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -20,7 +20,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Note that `from` + `size` can not be more than the `index.max_result_window` diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index a800bebf7d4..73d9a1d93d7 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -20,7 +20,7 @@ For example, to get highlights for the `content` field in each search hit using the default highlighter, include a `highlight` object in the request body that specifies the `content` field: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -34,7 +34,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] {es} supports three highlighters: `unified`, `plain`, and `fvh` (fast vector @@ -276,7 +275,7 @@ type:: The highlighter to use: `unified`, `plain`, or `fvh`. Defaults to You can specify highlighter settings globally and selectively override them for individual fields. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -295,7 +294,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [float] @@ -307,7 +305,7 @@ when highlighting. For example, the following query includes both the search query and rescore query in the `highlight_query`. Without the `highlight_query`, highlighting would only take the search query into account. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -365,7 +363,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [float] @@ -376,7 +373,7 @@ The `type` field allows to force a specific highlighter type. The allowed values are: `unified`, `plain` and `fvh`. The following is an example that forces the use of the plain highlighter: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -390,7 +387,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [[configure-tags]] @@ -401,7 +397,7 @@ By default, the highlighting will wrap highlighted text in `` and ``. This can be controlled by setting `pre_tags` and `post_tags`, for example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -417,13 +413,12 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] When using the fast vector highlighter, you can specify additional tags and the "importance" is ordered. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -439,12 +434,11 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] You can also use the built-in `styled` tag schema: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -459,7 +453,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [float] @@ -469,7 +462,7 @@ GET /_search Forces the highlighting to highlight fields based on the source even if fields are stored separately. Defaults to `false`. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -483,7 +476,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] @@ -494,7 +486,7 @@ GET /_search By default, only fields that contains a query match are highlighted. Set `require_field_match` to `false` to highlight all fields. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -509,7 +501,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [[matched-fields]] @@ -528,7 +519,7 @@ the matches are combined is loaded so only that field would benefit from having In the following examples, `comment` is analyzed by the `english` analyzer and `comment.plain` is analyzed by the `standard` analyzer. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -549,7 +540,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The above matches both "run with scissors" and "running with scissors" @@ -558,7 +548,7 @@ phrases appear in a large document then "running with scissors" is sorted above "run with scissors" in the fragments list because there are more matches in that fragment. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -579,14 +569,13 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The above highlights "run" as well as "running" and "scissors" but still sorts "running with scissors" above "run with scissors" because the plain match ("running") is boosted. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -607,7 +596,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The above query wouldn't highlight "run" or "scissor" but shows that @@ -656,7 +644,7 @@ Elasticsearch highlights the fields in the order that they are sent, but per the JSON spec, objects are unordered. If you need to be explicit about the order in which fields are highlighted specify the `fields` as an array: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -668,7 +656,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] None of the highlighters built into Elasticsearch care about the order that the @@ -686,7 +673,7 @@ in characters (defaults to `100`), and the maximum number of fragments to return (defaults to `5`). For example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -700,13 +687,12 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] On top of this it is possible to specify that highlighted fragments need to be sorted by score: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -721,7 +707,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] If the `number_of_fragments` value is set to `0` then no fragments are @@ -730,7 +715,7 @@ course it is highlighted. This can be very handy if short texts (like document title or address) need to be highlighted but no fragmentation is required. Note that `fragment_size` is ignored in this case. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -745,7 +730,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] When using `fvh` one can use `fragment_offset` @@ -757,7 +741,7 @@ beginning of the field by setting `no_match_size` (default `0`) to the length of the text that you want returned. The actual length may be shorter or longer than specified as it tries to break on a word boundary. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -775,7 +759,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [float] @@ -785,7 +768,7 @@ GET /_search Here is an example of setting the `comment` field in the index mapping to allow for highlighting using the postings: -[source,js] +[source,console] -------------------------------------------------- PUT /example { @@ -799,12 +782,11 @@ PUT /example } } -------------------------------------------------- -// CONSOLE Here is an example of setting the `comment` field to allow for highlighting using the `term_vectors` (this will cause the index to be bigger): -[source,js] +[source,console] -------------------------------------------------- PUT /example { @@ -818,7 +800,6 @@ PUT /example } } -------------------------------------------------- -// CONSOLE [float] [[specify-fragmenter]] @@ -827,7 +808,7 @@ PUT /example When using the `plain` highlighter, you can choose between the `simple` and `span` fragmenters: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_search { @@ -846,7 +827,6 @@ GET twitter/_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Response: @@ -886,7 +866,7 @@ Response: -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,/] -[source,js] +[source,console] -------------------------------------------------- GET twitter/_search { @@ -905,7 +885,6 @@ GET twitter/_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Response: diff --git a/docs/reference/search/request/index-boost.asciidoc b/docs/reference/search/request/index-boost.asciidoc index 9cf09fcd7b4..95b745f2628 100644 --- a/docs/reference/search/request/index-boost.asciidoc +++ b/docs/reference/search/request/index-boost.asciidoc @@ -7,7 +7,7 @@ one index matter more than hits coming from another index (think social graph where each user has an index). deprecated[5.2.0, This format is deprecated. Please use array format instead.] -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -17,12 +17,11 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:index_boost warning:Object format in indices_boost is deprecated, please use array format instead] You can also specify it as an array to control the order of boosts. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -32,7 +31,6 @@ GET /_search ] } -------------------------------------------------- -// CONSOLE // TEST[continued] This is important when you use aliases or wildcard expression. diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index 8e3641f9525..51c2c158903 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -83,7 +83,7 @@ Inner hits also supports the following per document features: The nested `inner_hits` can be used to include nested inner objects as inner hits to a search hit. -[source,js] +[source,console] -------------------------------------------------- PUT test { @@ -124,7 +124,6 @@ POST test/_search } } -------------------------------------------------- -// CONSOLE <1> The inner hit definition in the nested query. No other options need to be defined. @@ -207,7 +206,7 @@ has an impact on the time it takes to execute the entire search request, especia are set higher than the default. To avoid the relatively expensive source extraction for nested inner hits, one can disable including the source and solely rely on doc values fields. Like this: -[source,js] +[source,console] -------------------------------------------------- PUT test { @@ -253,7 +252,6 @@ POST test/_search } } -------------------------------------------------- -// CONSOLE //// @@ -321,7 +319,7 @@ If a mapping has multiple levels of hierarchical nested object fields each level For example if there is a `comments` nested field that contains a `votes` nested field and votes should directly be returned with the root hits then the following path can be defined: -[source,js] +[source,console] -------------------------------------------------- PUT test { @@ -374,7 +372,6 @@ POST test/_search } } -------------------------------------------------- -// CONSOLE Which would look like: @@ -441,7 +438,7 @@ This indirect referencing is only supported for nested inner hits. The parent/child `inner_hits` can be used to include parent or child: -[source,js] +[source,console] -------------------------------------------------- PUT test { @@ -487,7 +484,6 @@ POST test/_search } } -------------------------------------------------- -// CONSOLE <1> The inner hit definition like in the nested example. diff --git a/docs/reference/search/request/min-score.asciidoc b/docs/reference/search/request/min-score.asciidoc index 1a03d6d3ee4..503e114d4db 100644 --- a/docs/reference/search/request/min-score.asciidoc +++ b/docs/reference/search/request/min-score.asciidoc @@ -4,7 +4,7 @@ Exclude documents which have a `_score` less than the minimum specified in `min_score`: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -14,7 +14,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Note, most times, this does not make much sense, but is provided for advanced use cases. diff --git a/docs/reference/search/request/named-queries-and-filters.asciidoc b/docs/reference/search/request/named-queries-and-filters.asciidoc index c850210f030..97b6969ea19 100644 --- a/docs/reference/search/request/named-queries-and-filters.asciidoc +++ b/docs/reference/search/request/named-queries-and-filters.asciidoc @@ -3,7 +3,7 @@ Each filter and query can accept a `_name` in its top level definition. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -23,7 +23,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The search response will include for each hit the `matched_queries` it matched on. The tagging of queries and filters only make sense for the `bool` query. diff --git a/docs/reference/search/request/post-filter.asciidoc b/docs/reference/search/request/post-filter.asciidoc index 478b4082e04..db143869c26 100644 --- a/docs/reference/search/request/post-filter.asciidoc +++ b/docs/reference/search/request/post-filter.asciidoc @@ -7,7 +7,7 @@ best explained by example: Imagine that you are selling shirts that have the following properties: -[source,js] +[source,console] -------------------------------------------------- PUT /shirts { @@ -27,7 +27,6 @@ PUT /shirts/_doc/1?refresh "model": "slim" } -------------------------------------------------- -// CONSOLE // TESTSETUP @@ -37,7 +36,7 @@ Imagine a user has specified two filters: Gucci in the search results. Normally you would do this with a <>: -[source,js] +[source,console] -------------------------------------------------- GET /shirts/_search { @@ -51,7 +50,6 @@ GET /shirts/_search } } -------------------------------------------------- -// CONSOLE However, you would also like to use _faceted navigation_ to display a list of other options that the user could click on. Perhaps you have a `model` field @@ -61,7 +59,7 @@ that would allow the user to limit their search results to red Gucci This can be done with a <>: -[source,js] +[source,console] -------------------------------------------------- GET /shirts/_search { @@ -80,7 +78,7 @@ GET /shirts/_search } } -------------------------------------------------- -// CONSOLE + <1> Returns the most popular models of red shirts by Gucci. But perhaps you would also like to tell the user how many Gucci shirts are @@ -92,7 +90,7 @@ Instead, you want to include shirts of all colors during aggregation, then apply the `colors` filter only to the search results. This is the purpose of the `post_filter`: -[source,js] +[source,console] -------------------------------------------------- GET /shirts/_search { @@ -123,7 +121,7 @@ GET /shirts/_search } } -------------------------------------------------- -// CONSOLE + <1> The main query now finds all shirts by Gucci, regardless of color. <2> The `colors` agg returns popular colors for shirts by Gucci. <3> The `color_red` agg limits the `models` sub-aggregation diff --git a/docs/reference/search/request/preference.asciidoc b/docs/reference/search/request/preference.asciidoc index 12bcca51c17..8462748de4c 100644 --- a/docs/reference/search/request/preference.asciidoc +++ b/docs/reference/search/request/preference.asciidoc @@ -55,7 +55,7 @@ Custom (string) value:: For instance, use the user's session ID `xyzabc123` as follows: -[source,js] +[source,console] ------------------------------------------------ GET /_search?preference=xyzabc123 { @@ -66,7 +66,6 @@ GET /_search?preference=xyzabc123 } } ------------------------------------------------ -// CONSOLE This can be an effective strategy to increase usage of e.g. the request cache for unique users running similar searches repeatedly by always hitting the same cache, while diff --git a/docs/reference/search/request/query.asciidoc b/docs/reference/search/request/query.asciidoc index d114bf93791..a3449aa4453 100644 --- a/docs/reference/search/request/query.asciidoc +++ b/docs/reference/search/request/query.asciidoc @@ -4,7 +4,7 @@ The query element within the search request body allows to define a query using the <>. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -13,4 +13,3 @@ GET /_search } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/search/request/rescore.asciidoc b/docs/reference/search/request/rescore.asciidoc index 64c3896ee3d..547bca1f64f 100644 --- a/docs/reference/search/request/rescore.asciidoc +++ b/docs/reference/search/request/rescore.asciidoc @@ -39,7 +39,7 @@ respectively. Both default to `1`. For example: -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -68,7 +68,6 @@ POST /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The way the scores are combined can be controlled with the `score_mode`: @@ -87,7 +86,7 @@ for <> rescores. It is also possible to execute multiple rescores in sequence: -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -130,7 +129,6 @@ POST /_search } ] } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The first one gets the results of the query then the second one gets the diff --git a/docs/reference/search/request/script-fields.asciidoc b/docs/reference/search/request/script-fields.asciidoc index 090e403c578..0078400b003 100644 --- a/docs/reference/search/request/script-fields.asciidoc +++ b/docs/reference/search/request/script-fields.asciidoc @@ -4,7 +4,7 @@ Allows to return a <> (based on different fields) for each hit, for example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -30,7 +30,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:sales] Script fields can work on fields that are not stored (`price` in @@ -41,7 +40,7 @@ Script fields can also access the actual `_source` document and extract specific elements to be returned from it by using `params['_source']`. Here is an example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -55,7 +54,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Note the `_source` keyword here to navigate the json-like model. diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index bb5ad288864..3d613e4172b 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -36,7 +36,7 @@ In order to use scrolling, the initial search request should specify the `scroll` parameter in the query string, which tells Elasticsearch how long it should keep the ``search context'' alive (see <>), eg `?scroll=1m`. -[source,js] +[source,console] -------------------------------------------------- POST /twitter/_search?scroll=1m { @@ -48,14 +48,13 @@ POST /twitter/_search?scroll=1m } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The result from the above request includes a `_scroll_id`, which should be passed to the `scroll` API in order to retrieve the next batch of results. -[source,js] +[source,console] -------------------------------------------------- POST /_search/scroll <1> { @@ -63,7 +62,6 @@ POST /_search/scroll <1> "scroll_id" : "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==" <3> } -------------------------------------------------- -// CONSOLE // TEST[continued s/DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==/$body._scroll_id/] <1> `GET` or `POST` can be used and the URL should not include the `index` @@ -88,7 +86,7 @@ NOTE: Scroll requests have optimizations that make them faster when the sort order is `_doc`. If you want to iterate over all documents regardless of the order, this is the most efficient option: -[source,js] +[source,console] -------------------------------------------------- GET /_search?scroll=1m { @@ -97,7 +95,6 @@ GET /_search?scroll=1m ] } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] [[scroll-search-context]] @@ -142,11 +139,10 @@ maximum number of open scrolls is 500. This limit can be updated with the You can check how many search contexts are open with the <>: -[source,js] +[source,console] --------------------------------------- GET /_nodes/stats/indices/search --------------------------------------- -// CONSOLE ===== Clear scroll API @@ -156,19 +152,18 @@ exceeded. However keeping scrolls open has a cost, as discussed in the cleared as soon as the scroll is not being used anymore using the `clear-scroll` API: -[source,js] +[source,console] --------------------------------------- DELETE /_search/scroll { "scroll_id" : "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==" } --------------------------------------- -// CONSOLE // TEST[catch:missing] Multiple scroll IDs can be passed as array: -[source,js] +[source,console] --------------------------------------- DELETE /_search/scroll { @@ -178,25 +173,22 @@ DELETE /_search/scroll ] } --------------------------------------- -// CONSOLE // TEST[catch:missing] All search contexts can be cleared with the `_all` parameter: -[source,js] +[source,console] --------------------------------------- DELETE /_search/scroll/_all --------------------------------------- -// CONSOLE The `scroll_id` can also be passed as a query string parameter or in the request body. Multiple scroll IDs can be passed as comma separated values: -[source,js] +[source,console] --------------------------------------- DELETE /_search/scroll/DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==,DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB --------------------------------------- -// CONSOLE // TEST[catch:missing] [[sliced-scroll]] @@ -205,7 +197,7 @@ DELETE /_search/scroll/DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMN For scroll queries that return a lot of documents it is possible to split the scroll in multiple slices which can be consumed independently: -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search?scroll=1m { @@ -232,7 +224,6 @@ GET /twitter/_search?scroll=1m } } -------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] <1> The id of the slice @@ -268,7 +259,7 @@ slice gets deterministic results. * The cardinality of the field should be high. This ensures that each slice gets approximately the same amount of documents. -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search?scroll=1m { @@ -284,7 +275,6 @@ GET /twitter/_search?scroll=1m } } -------------------------------------------------- -// CONSOLE // TEST[setup:big_twitter] For append only time-based indices, the `timestamp` field can be used safely. diff --git a/docs/reference/search/request/search-after.asciidoc b/docs/reference/search/request/search-after.asciidoc index e940b0688cc..76313dee518 100644 --- a/docs/reference/search/request/search-after.asciidoc +++ b/docs/reference/search/request/search-after.asciidoc @@ -9,7 +9,8 @@ The `search_after` parameter circumvents this problem by providing a live cursor The idea is to use the results from the previous page to help the retrieval of the next page. Suppose that the query to retrieve the first page looks like this: -[source,js] + +[source,console] -------------------------------------------------- GET twitter/_search { @@ -25,7 +26,6 @@ GET twitter/_search ] } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] // TEST[s/"tie_breaker_id": "asc"/"tie_breaker_id": {"unmapped_type": "keyword"}/] @@ -53,7 +53,7 @@ These `sort values` can be used in conjunction with the `search_after` parameter document in the result list. For instance we can use the `sort values` of the last document and pass it to `search_after` to retrieve the next page of results: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_search { @@ -70,7 +70,6 @@ GET twitter/_search ] } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] // TEST[s/"tie_breaker_id": "asc"/"tie_breaker_id": {"unmapped_type": "keyword"}/] diff --git a/docs/reference/search/request/search-type.asciidoc b/docs/reference/search/request/search-type.asciidoc index 0695b604338..ccb34b78875 100644 --- a/docs/reference/search/request/search-type.asciidoc +++ b/docs/reference/search/request/search-type.asciidoc @@ -50,11 +50,10 @@ During the second phase, the coordinating node requests the document content (and highlighted snippets, if any) from *only the relevant shards*. -[source,js] +[source,console] -------------------------------------------------- GET twitter/_search?search_type=query_then_fetch -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] NOTE: This is the default setting, if you do not specify a `search_type` @@ -69,9 +68,8 @@ Same as "Query Then Fetch", except for an initial scatter phase which goes and computes the distributed term frequencies for more accurate scoring. -[source,js] +[source,console] -------------------------------------------------- GET twitter/_search?search_type=dfs_query_then_fetch -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] diff --git a/docs/reference/search/request/seq-no.asciidoc b/docs/reference/search/request/seq-no.asciidoc index 5bfc328a309..b8d29a685c0 100644 --- a/docs/reference/search/request/seq-no.asciidoc +++ b/docs/reference/search/request/seq-no.asciidoc @@ -4,7 +4,7 @@ Returns the sequence number and primary term of the last modification to each search hit. See <> for more details. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -14,4 +14,3 @@ GET /_search } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index fdec892f2cc..cc3b7298957 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -7,7 +7,7 @@ field name for `_score` to sort by score, and `_doc` to sort by index order. Assuming the following index mapping: -[source,js] +[source,console] -------------------------------------------------- PUT /my_index { @@ -25,9 +25,8 @@ PUT /my_index } } -------------------------------------------------- -// CONSOLE -[source,js] +[source,console] -------------------------------------------------- GET /my_index/_search { @@ -43,7 +42,6 @@ GET /my_index/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] NOTE: `_doc` has no real use-case besides being the most efficient sort order. @@ -92,7 +90,7 @@ In the example below the field price has multiple prices per document. In this case the result hits will be sorted by price ascending based on the average price per document. -[source,js] +[source,console] -------------------------------------------------- PUT /my_index/_doc/1?refresh { @@ -110,7 +108,6 @@ POST /_search ] } -------------------------------------------------- -// CONSOLE ===== Sorting numeric fields @@ -122,7 +119,7 @@ indices. Consider for instance these two indices: -[source,js] +[source,console] -------------------------------------------------- PUT /index_double { @@ -133,9 +130,8 @@ PUT /index_double } } -------------------------------------------------- -// CONSOLE -[source,js] +[source,console] -------------------------------------------------- PUT /index_long { @@ -146,7 +142,6 @@ PUT /index_long } } -------------------------------------------------- -// CONSOLE // TEST[continued] Since `field` is mapped as a `double` in the first index and as a `long` @@ -155,7 +150,7 @@ that query both indices by default. However you can force the type to one or the other with the `numeric_type` option in order to force a specific type for all indices: -[source,js] +[source,console] -------------------------------------------------- POST /index_long,index_double/_search { @@ -168,7 +163,6 @@ POST /index_long,index_double/_search ] } -------------------------------------------------- -// CONSOLE // TEST[continued] In the example above, values for the `index_long` index are casted to @@ -183,7 +177,7 @@ This option can also be used to convert a `date` field that uses millisecond resolution to a `date_nanos` field with nanosecond resolution. Consider for instance these two indices: -[source,js] +[source,console] -------------------------------------------------- PUT /index_double { @@ -194,9 +188,8 @@ PUT /index_double } } -------------------------------------------------- -// CONSOLE -[source,js] +[source,console] -------------------------------------------------- PUT /index_long { @@ -207,7 +200,6 @@ PUT /index_long } } -------------------------------------------------- -// CONSOLE // TEST[continued] Values in these indices are stored with different resolutions so sorting on these @@ -216,7 +208,7 @@ With the `numeric_type` type option it is possible to set a single resolution fo the sort, setting to `date` will convert the `date_nanos` to the millisecond resolution while `date_nanos` will convert the values in the `date` field to the nanoseconds resolution: -[source,js] +[source,console] -------------------------------------------------- POST /index_long,index_double/_search { @@ -229,7 +221,6 @@ POST /index_long,index_double/_search ] } -------------------------------------------------- -// CONSOLE // TEST[continued] [WARNING] @@ -274,7 +265,7 @@ favor of the options documented above. In the below example `offer` is a field of type `nested`. The nested `path` needs to be specified; otherwise, Elasticsearch doesn't know on what nested level sort values need to be captured. -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -297,12 +288,11 @@ POST /_search ] } -------------------------------------------------- -// CONSOLE In the below example `parent` and `child` fields are of type `nested`. The `nested_path` needs to be specified at each level; otherwise, Elasticsearch doesn't know on what nested level sort values need to be captured. -[source,js] +[source,console] -------------------------------------------------- POST /_search { @@ -344,7 +334,6 @@ POST /_search ] } -------------------------------------------------- -// CONSOLE Nested sorting is also supported when sorting by scripts and sorting by geo distance. @@ -359,7 +348,7 @@ The default is `_last`. For example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -371,7 +360,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE NOTE: If a nested inner object doesn't match with the `nested_filter` then a missing value is used. @@ -384,7 +372,7 @@ fields that have no mapping and not sort by them. The value of this parameter is used to determine what sort values to emit. Here is an example of how it can be used: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -396,7 +384,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE If any of the indices that are queried doesn't have a mapping for `price` then Elasticsearch will handle it as if there was a mapping of type @@ -407,7 +394,7 @@ then Elasticsearch will handle it as if there was a mapping of type Allow to sort by `_geo_distance`. Here is an example, assuming `pin.location` is a field of type `geo_point`: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -428,7 +415,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE @@ -461,7 +447,7 @@ The following formats are supported in providing the coordinates: ====== Lat Lon as Properties -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -482,13 +468,12 @@ GET /_search } } -------------------------------------------------- -// CONSOLE ====== Lat Lon as String Format in `lat,lon`. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -506,11 +491,10 @@ GET /_search } } -------------------------------------------------- -// CONSOLE ====== Geohash -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -528,14 +512,13 @@ GET /_search } } -------------------------------------------------- -// CONSOLE ====== Lat Lon as Array Format in `[lon, lat]`, note, the order of lon/lat here in order to conform with http://geojson.org/[GeoJSON]. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -553,14 +536,13 @@ GET /_search } } -------------------------------------------------- -// CONSOLE ===== Multiple reference points Multiple geo points can be passed as an array containing any `geo_point` format, for example -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -578,7 +560,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE and so forth. @@ -590,7 +571,7 @@ The final distance for a document will then be `min`/`max`/`avg` (defined via `m Allow to sort based on custom scripts, here is an example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -612,7 +593,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE ===== Track Scores @@ -620,7 +600,7 @@ GET /_search When sorting on a field, scores are not computed. By setting `track_scores` to true, scores will still be computed and tracked. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -635,7 +615,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE ===== Memory Considerations diff --git a/docs/reference/search/request/source-filtering.asciidoc b/docs/reference/search/request/source-filtering.asciidoc index 467c31356b7..fc838bdd908 100644 --- a/docs/reference/search/request/source-filtering.asciidoc +++ b/docs/reference/search/request/source-filtering.asciidoc @@ -11,7 +11,7 @@ You can turn off `_source` retrieval by using the `_source` parameter: To disable `_source` retrieval set to `false`: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -21,13 +21,12 @@ GET /_search } } -------------------------------------------------- -// CONSOLE The `_source` also accepts one or more wildcard patterns to control what parts of the `_source` should be returned: For example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -37,11 +36,10 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Or -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -51,7 +49,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE Finally, for complete control, you can specify both `includes` and `excludes` patterns. If `includes` is not empty, then only fields that match one of the @@ -59,7 +56,7 @@ patterns in `includes` but none of the patterns in `excludes` are provided in `_source`. If `includes` is empty, then all fields are provided in `_source`, except for those that match a pattern in `excludes`. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -72,4 +69,3 @@ GET /_search } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/search/request/stored-fields.asciidoc b/docs/reference/search/request/stored-fields.asciidoc index 550c3b81af4..c1f0feaed96 100644 --- a/docs/reference/search/request/stored-fields.asciidoc +++ b/docs/reference/search/request/stored-fields.asciidoc @@ -9,7 +9,7 @@ subsets of the original source document to be returned. Allows to selectively load specific stored fields for each document represented by a search hit. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -19,14 +19,13 @@ GET /_search } } -------------------------------------------------- -// CONSOLE `*` can be used to load all stored fields from the document. An empty array will cause only the `_id` and `_type` for each hit to be returned, for example: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -36,7 +35,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE If the requested fields are not stored (`store` mapping set to `false`), they will be ignored. @@ -58,7 +56,7 @@ must be used within an <> block. To disable the stored fields (and metadata fields) entirely use: `_none_`: -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -68,7 +66,6 @@ GET /_search } } -------------------------------------------------- -// CONSOLE NOTE: <> and <> parameters cannot be activated if `_none_` is used. diff --git a/docs/reference/search/request/track-total-hits.asciidoc b/docs/reference/search/request/track-total-hits.asciidoc index 9635d8f4a46..efd6156e36d 100644 --- a/docs/reference/search/request/track-total-hits.asciidoc +++ b/docs/reference/search/request/track-total-hits.asciidoc @@ -19,7 +19,7 @@ should be interpreted. A value of `"gte"` means that the `"total.value"` is a lower bound of the total hits that match the query and a value of `"eq"` indicates that `"total.value"` is the accurate count. -[source,js] +[source,console] -------------------------------------------------- GET twitter/_search { @@ -32,7 +32,6 @@ GET twitter/_search } -------------------------------------------------- // TEST[setup:twitter] -// CONSOLE \... returns: @@ -65,7 +64,7 @@ It is also possible to set `track_total_hits` to an integer. For instance the following query will accurately track the total hit count that match the query up to 100 documents: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_search { @@ -77,7 +76,6 @@ GET twitter/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] The `hits.total.relation` in the response will indicate if the @@ -140,7 +138,7 @@ will indicate that the returned value is a lower bound: If you don't need to track the total number of hits at all you can improve query times by setting this option to `false`: -[source,js] +[source,console] -------------------------------------------------- GET twitter/_search { @@ -152,7 +150,6 @@ GET twitter/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] \... returns: diff --git a/docs/reference/search/request/version.asciidoc b/docs/reference/search/request/version.asciidoc index 527481348e3..d2149dddce2 100644 --- a/docs/reference/search/request/version.asciidoc +++ b/docs/reference/search/request/version.asciidoc @@ -3,7 +3,7 @@ Returns a version for each search hit. -[source,js] +[source,console] -------------------------------------------------- GET /_search { @@ -13,4 +13,3 @@ GET /_search } } -------------------------------------------------- -// CONSOLE diff --git a/docs/reference/search/search-shards.asciidoc b/docs/reference/search/search-shards.asciidoc index 5d900105525..6909ff4059b 100644 --- a/docs/reference/search/search-shards.asciidoc +++ b/docs/reference/search/search-shards.asciidoc @@ -3,11 +3,10 @@ Returns the indices and shards that a search request would be executed against. -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search_shards -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT twitter\n{"settings":{"index.number_of_shards":5}}\n/] @@ -55,11 +54,10 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=routing] [[search-shards-api-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search_shards -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT twitter\n{"settings":{"index.number_of_shards":5}}\n/] The API returns the following result: @@ -140,11 +138,10 @@ The API returns the following result: Specifying the same request, this time with a routing value: -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search_shards?routing=foo,bar -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT twitter\n{"settings":{"index.number_of_shards":5}}\n/] The API returns the following result: diff --git a/docs/reference/search/search-template.asciidoc b/docs/reference/search/search-template.asciidoc index 5a8b7f4e251..67aa9001c21 100644 --- a/docs/reference/search/search-template.asciidoc +++ b/docs/reference/search/search-template.asciidoc @@ -3,7 +3,7 @@ Allows you to use the mustache language to pre render search requests. -[source,js] +[source,console] ------------------------------------------ GET _search/template { @@ -18,7 +18,6 @@ GET _search/template } } ------------------------------------------ -// CONSOLE // TEST[setup:twitter] [[search-template-api-request]] @@ -107,7 +106,7 @@ The API request body must contain the search definition template and its paramet You can store a search template using the stored scripts API. -[source,js] +[source,console] ------------------------------------------ POST _scripts/ { @@ -123,7 +122,6 @@ POST _scripts/ } } ------------------------------------------ -// CONSOLE // TEST[continued] ////////////////////////// @@ -143,11 +141,10 @@ created: The template can be retrieved by calling -[source,js] +[source,console] ------------------------------------------ GET _scripts/ ------------------------------------------ -// CONSOLE // TEST[continued] The API returns the following result: @@ -170,11 +167,10 @@ The API returns the following result: This template can be deleted by calling -[source,js] +[source,console] ------------------------------------------ DELETE _scripts/ ------------------------------------------ -// CONSOLE // TEST[continued] @@ -183,7 +179,7 @@ DELETE _scripts/ To use a stored template at search time send the following request: -[source,js] +[source,console] ------------------------------------------ GET _search/template { @@ -193,7 +189,6 @@ GET _search/template } } ------------------------------------------ -// CONSOLE // TEST[catch:missing] <1> Name of the stored template script. @@ -204,7 +199,7 @@ GET _search/template A template can be rendered in a response with given parameters by using the following request: -[source,js] +[source,console] ------------------------------------------ GET _render/template { @@ -216,7 +211,6 @@ GET _render/template } } ------------------------------------------ -// CONSOLE The API returns the rendered template: @@ -258,7 +252,7 @@ GET _render/template/ You can use the `explain` parameter when running a template: -[source,js] +[source,console] ------------------------------------------ GET _search/template { @@ -269,7 +263,6 @@ GET _search/template "explain": true } ------------------------------------------ -// CONSOLE // TEST[catch:missing] @@ -278,7 +271,7 @@ GET _search/template You can use the `profile` parameter when running a template: -[source,js] +[source,console] ------------------------------------------ GET _search/template { @@ -289,14 +282,13 @@ GET _search/template "profile": true } ------------------------------------------ -// CONSOLE // TEST[catch:missing] [[search-template-query-string-single]] ===== Filling in a query string with a single value -[source,js] +[source,console] ------------------------------------------ GET _search/template { @@ -312,7 +304,6 @@ GET _search/template } } ------------------------------------------ -// CONSOLE // TEST[setup:twitter] [[search-template-converting-to-json]] @@ -321,7 +312,7 @@ GET _search/template The `{{#toJson}}parameter{{/toJson}}` function can be used to convert parameters like maps and array to their JSON representation: -[source,js] +[source,console] ------------------------------------------ GET _search/template { @@ -333,7 +324,6 @@ GET _search/template } } ------------------------------------------ -// CONSOLE which is rendered as: @@ -354,7 +344,7 @@ which is rendered as: A more complex example substitutes an array of JSON objects: -[source,js] +[source,console] ------------------------------------------ GET _search/template { @@ -367,7 +357,6 @@ GET _search/template } } ------------------------------------------ -// CONSOLE which is rendered as: @@ -400,7 +389,7 @@ which is rendered as: The `{{#join}}array{{/join}}` function can be used to concatenate the values of an array as a comma delimited string: -[source,js] +[source,console] ------------------------------------------ GET _search/template { @@ -416,7 +405,6 @@ GET _search/template } } ------------------------------------------ -// CONSOLE which is rendered as: @@ -434,7 +422,7 @@ which is rendered as: The function also accepts a custom delimiter: -[source,js] +[source,console] ------------------------------------------ GET _search/template { @@ -458,7 +446,6 @@ GET _search/template } } ------------------------------------------ -// CONSOLE which is rendered as: @@ -624,7 +611,7 @@ http://www.w3.org/TR/html4/[HTML specification]. As an example, it is useful to encode a URL: -[source,js] +[source,console] ------------------------------------------ GET _render/template { @@ -641,7 +628,6 @@ GET _render/template } } ------------------------------------------ -// CONSOLE The previous query will be rendered as: diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index 9e396485e2f..564254026f7 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -3,11 +3,10 @@ Returns search hits that match the query defined in the request. -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search?q=user:kimchy -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] @@ -225,28 +224,25 @@ All search APIs can be applied across multiple indices with support for the <>. For example, we can search on all documents within the twitter index: -[source,js] +[source,console] -------------------------------------------------- GET /twitter/_search?q=user:kimchy -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] We can also search all documents with a certain tag across several indices (for example, when there is one index per user): -[source,js] +[source,console] -------------------------------------------------- GET /kimchy,elasticsearch/_search?q=tag:wow -------------------------------------------------- -// CONSOLE // TEST[s/^/PUT kimchy\nPUT elasticsearch\n/] Or we can search across all available indices using `_all`: -[source,js] +[source,console] --------------------------------------------------- GET /_all/_search?q=tag:wow --------------------------------------------------- -// CONSOLE // TEST[setup:twitter] diff --git a/docs/reference/search/suggesters.asciidoc b/docs/reference/search/suggesters.asciidoc index e9b7d8c9258..e5f715823c6 100644 --- a/docs/reference/search/suggesters.asciidoc +++ b/docs/reference/search/suggesters.asciidoc @@ -12,7 +12,7 @@ NOTE: `_suggest` endpoint has been deprecated in favour of using suggest via `_search` endpoint. In 5.0, the `_search` endpoint has been optimized for suggest only search requests. -[source,js] +[source,console] -------------------------------------------------- POST twitter/_search { @@ -31,7 +31,6 @@ POST twitter/_search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] Several suggestions can be specified per request. Each suggestion is @@ -39,7 +38,7 @@ identified with an arbitrary name. In the example below two suggestions are requested. Both `my-suggest-1` and `my-suggest-2` suggestions use the `term` suggester, but have a different `text`. -[source,js] +[source,console] -------------------------------------------------- POST _search { @@ -59,7 +58,6 @@ POST _search } } -------------------------------------------------- -// CONSOLE // TEST[setup:twitter] The below suggest response example includes the suggestion response for @@ -115,7 +113,7 @@ To avoid repetition of the suggest text, it is possible to define a global text. In the example below the suggest text is defined globally and applies to the `my-suggest-1` and `my-suggest-2` suggestions. -[source,js] +[source,console] -------------------------------------------------- POST _search { @@ -134,7 +132,6 @@ POST _search } } -------------------------------------------------- -// CONSOLE The suggest text can in the above example also be specified as suggestion specific option. The suggest text specified on suggestion diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 86a1701719d..048f27b4e34 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -24,7 +24,7 @@ but are costly to build and are stored in-memory. To use this feature, specify a special mapping for this field, which indexes the field values for fast completions. -[source,js] +[source,console] -------------------------------------------------- PUT music { @@ -40,7 +40,6 @@ PUT music } } -------------------------------------------------- -// CONSOLE // TESTSETUP Mapping supports the following parameters: @@ -81,7 +80,7 @@ You index suggestions like any other field. A suggestion is made of an text to be matched by a suggestion query and the `weight` determines how the suggestions will be scored. Indexing a suggestion is as follows: -[source,js] +[source,console] -------------------------------------------------- PUT music/_doc/1?refresh { @@ -91,7 +90,6 @@ PUT music/_doc/1?refresh } } -------------------------------------------------- -// CONSOLE // TEST The following parameters are supported: @@ -108,7 +106,7 @@ The following parameters are supported: You can index multiple suggestions for a document as follows: -[source,js] +[source,console] -------------------------------------------------- PUT music/_doc/1?refresh { @@ -124,20 +122,18 @@ PUT music/_doc/1?refresh ] } -------------------------------------------------- -// CONSOLE // TEST[continued] You can use the following shorthand form. Note that you can not specify a weight with suggestion(s) in the shorthand form. -[source,js] +[source,console] -------------------------------------------------- PUT music/_doc/1?refresh { "suggest" : [ "Nevermind", "Nirvana" ] } -------------------------------------------------- -// CONSOLE // TEST[continued] [[querying]] @@ -148,7 +144,7 @@ type as `completion`. Suggestions are near real-time, which means new suggestions can be made visible by <> and documents once deleted are never shown. This request: -[source,js] +[source,console] -------------------------------------------------- POST music/_search?pretty { @@ -162,7 +158,6 @@ POST music/_search?pretty } } -------------------------------------------------- -// CONSOLE // TEST[continued] <1> Prefix used to search for suggestions @@ -218,7 +213,7 @@ using <> to minimize `_source` size. Note that the _suggest endpoint doesn't support source filtering but using suggest on the `_search` endpoint does: -[source,js] +[source,console] -------------------------------------------------- POST music/_search { @@ -234,7 +229,6 @@ POST music/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] <1> Filter the source to return only the `suggest` field @@ -310,7 +304,7 @@ Queries can return duplicate suggestions coming from different documents. It is possible to modify this behavior by setting `skip_duplicates` to true. When set, this option filters out documents with duplicate suggestions from the result. -[source,js] +[source,console] -------------------------------------------------- POST music/_search?pretty { @@ -325,7 +319,6 @@ POST music/_search?pretty } } -------------------------------------------------- -// CONSOLE WARNING: When set to true, this option can slow down search because more suggestions need to be visited to find the top N. @@ -336,7 +329,7 @@ need to be visited to find the top N. The completion suggester also supports fuzzy queries -- this means you can have a typo in your search and still get results back. -[source,js] +[source,console] -------------------------------------------------- POST music/_search?pretty { @@ -353,7 +346,6 @@ POST music/_search?pretty } } -------------------------------------------------- -// CONSOLE Suggestions that share the longest prefix to the query `prefix` will be scored higher. @@ -395,7 +387,7 @@ NOTE: If you want to stick with the default values, but The completion suggester also supports regex queries meaning you can express a prefix as a regular expression -[source,js] +[source,console] -------------------------------------------------- POST music/_search?pretty { @@ -409,7 +401,6 @@ POST music/_search?pretty } } -------------------------------------------------- -// CONSOLE The regex query can take specific regex parameters. The following parameters are supported: diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index de1774ded45..b8f6d2b8b18 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -19,7 +19,7 @@ NOTE: It is mandatory to provide a context when indexing and querying The following defines types, each with two context mappings for a completion field: -[source,js] +[source,console] -------------------------------------------------- PUT place { @@ -69,8 +69,8 @@ PUT place_path_category } } -------------------------------------------------- -// CONSOLE // TESTSETUP + <1> Defines a `category` context named 'place_type' where the categories must be sent with the suggestions. <2> Defines a `geo` context named 'location' where the categories must be sent @@ -94,7 +94,7 @@ The mappings are set up like the `place_type` fields above. If `path` is defined then the categories are read from that path in the document, otherwise they must be sent in the suggest field like this: -[source,js] +[source,console] -------------------------------------------------- PUT place/_doc/1 { @@ -106,13 +106,13 @@ PUT place/_doc/1 } } -------------------------------------------------- -// CONSOLE + <1> These suggestions will be associated with 'cafe' and 'food' category. If the mapping had a `path` then the following index request would be enough to add the categories: -[source,js] +[source,console] -------------------------------------------------- PUT place_path_category/_doc/1 { @@ -120,7 +120,7 @@ PUT place_path_category/_doc/1 "cat": ["cafe", "food"] <1> } -------------------------------------------------- -// CONSOLE + <1> These suggestions will be associated with 'cafe' and 'food' category. NOTE: If context mapping references another field and the categories @@ -134,7 +134,7 @@ of categories. Suggestions can be filtered by one or more categories. The following filters suggestions by multiple categories: -[source,js] +[source,console] -------------------------------------------------- POST place/_search?pretty { @@ -152,7 +152,6 @@ POST place/_search?pretty } } -------------------------------------------------- -// CONSOLE // TEST[continued] NOTE: If multiple categories or category contexts are set on the query @@ -163,7 +162,7 @@ Suggestions with certain categories can be boosted higher than others. The following filters suggestions by categories and additionally boosts suggestions associated with some categories: -[source,js] +[source,console] -------------------------------------------------- POST place/_search?pretty { @@ -184,8 +183,8 @@ POST place/_search?pretty } } -------------------------------------------------- -// CONSOLE // TEST[continued] + <1> The context query filter suggestions associated with categories 'cafe' and 'restaurants' and boosts the suggestions associated with 'restaurants' by a @@ -248,7 +247,7 @@ document via the `path` parameter, similar to `category` contexts. Associating m with a suggestion, will index the suggestion for every geo location. The following indexes a suggestion with two geo location contexts: -[source,js] +[source,console] -------------------------------------------------- PUT place/_doc/1 { @@ -269,7 +268,6 @@ PUT place/_doc/1 } } -------------------------------------------------- -// CONSOLE [float] ====== Geo location Query @@ -278,7 +276,7 @@ Suggestions can be filtered and boosted with respect to how close they are to on more geo points. The following filters suggestions that fall within the area represented by the encoded geohash of a geo point: -[source,js] +[source,console] -------------------------------------------------- POST place/_search { @@ -299,7 +297,6 @@ POST place/_search } } -------------------------------------------------- -// CONSOLE // TEST[continued] NOTE: When a location with a lower precision at query time is specified, all suggestions @@ -312,7 +309,7 @@ if they contain at least one of the provided context values. Suggestions that are within an area represented by a geohash can also be boosted higher than others, as shown by the following: -[source,js] +[source,console] -------------------------------------------------- POST place/_search?pretty { @@ -343,8 +340,8 @@ POST place/_search?pretty } } -------------------------------------------------- -// CONSOLE // TEST[continued] + <1> The context query filters for suggestions that fall under the geo location represented by a geohash of '(43.662, -79.380)' with a precision of '2' and boosts suggestions diff --git a/docs/reference/search/suggesters/term-suggest.asciidoc b/docs/reference/search/suggesters/term-suggest.asciidoc index 3fe3ec8be4e..43d91ce164f 100644 --- a/docs/reference/search/suggesters/term-suggest.asciidoc +++ b/docs/reference/search/suggesters/term-suggest.asciidoc @@ -53,9 +53,6 @@ doesn't take the query into account that is part of request. ===== Other term suggest options: [horizontal] -`lowercase_terms`:: - Lowercases the suggest text terms after text analysis. - `max_edits`:: The maximum edit distance candidate suggestions can have in order to be considered as a suggestion. Can only be a value diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index 1b77fde008c..9ec81279f3d 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -1,8 +1,73 @@ [[search-validate]] === Validate API -The validate API allows a user to validate a potentially expensive query -without executing it. We'll use the following test data to explain _validate: +Validates a potentially expensive query without executing it. + +[source,console] +-------------------------------------------------- +GET twitter/_validate/query?q=user:foo +-------------------------------------------------- +// TEST[setup:twitter] + + +[[search-validate-api-request]] +==== {api-request-title} + +`GET //_validate/` + + +[[search-validate-api-desc]] +==== {api-description-title} + +The validate API allows you to validate a potentially expensive query +without executing it. The query can be sent either as a path parameter or in the +request body. + + +[[search-validate-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=query] + + +[[search-validate-api-query-params]] +==== {api-query-parms-title} + +`all_shards`:: + (Optional, boolean) If `true`, the validation is executed on all shards + instead of one random shard per index. Defaults to `false`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=analyzer] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=analyze_wildcard] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=default_operator] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=df] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] + +`explain`:: + (Optional, boolean) If `true`, the response returns detailed information if an + error has occured. Defautls to `false`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=lenient] + +`rewrite`:: + (Optional, boolean) If `true`, returns a more detailed explanation showing the + actual Lucene query that will be executed. Defaults to `false`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=search-q] + + +[[search-validate-api-example]] +==== {api-examples-title} [source,console] -------------------------------------------------- @@ -12,7 +77,7 @@ PUT twitter/_bulk?refresh {"index":{"_id":2}} {"user" : "kimchi", "post_date" : "2009-11-15T14:12:13", "message" : "My username is similar to @kimchy!"} -------------------------------------------------- -// TESTSETUP + When sent a valid query: @@ -20,6 +85,8 @@ When sent a valid query: -------------------------------------------------- GET twitter/_validate/query?q=user:foo -------------------------------------------------- +// TEST[continued] + The response contains `valid:true`: @@ -29,31 +96,6 @@ The response contains `valid:true`: -------------------------------------------------- -[float] -=== Request Parameters - -When executing exists using the query parameter `q`, the query passed is -a query string using Lucene query parser. There are additional -parameters that can be passed: - -[cols="<,<",options="header",] -|======================================================================= -|Name |Description -|`df` |The default field to use when no field prefix is defined within the -query. - -|`analyzer` |The analyzer name to be used when analyzing the query string. - -|`default_operator` |The default operator to be used, can be `AND` or -`OR`. Defaults to `OR`. - -|`lenient` |If set to true will cause format based failures (like -providing text to a numeric field) to be ignored. Defaults to false. - -|`analyze_wildcard` |Should wildcard and prefix queries be analyzed or -not. Defaults to `false`. -|======================================================================= - The query may also be sent in the request body: [source,console] @@ -74,13 +116,14 @@ GET twitter/_validate/query } } -------------------------------------------------- +// TEST[continued] NOTE: The query being sent in the body must be nested in a `query` key, same as the <> works -If the query is invalid, `valid` will be `false`. Here the query is -invalid because Elasticsearch knows the post_date field should be a date -due to dynamic mapping, and 'foo' does not correctly parse into a date: +If the query is invalid, `valid` will be `false`. Here the query is invalid +because {es} knows the `post_date` field should be a date due to dynamic +mapping, and 'foo' does not correctly parse into a date: [source,console] -------------------------------------------------- @@ -94,14 +137,17 @@ GET twitter/_validate/query } } -------------------------------------------------- +// TEST[continued] [source,console-result] -------------------------------------------------- {"valid":false,"_shards":{"total":1,"successful":1,"failed":0}} -------------------------------------------------- -An `explain` parameter can be specified to get more detailed information -about why a query failed: +===== The explain parameter + +An `explain` parameter can be specified to get more detailed information about +why a query failed: [source,console] -------------------------------------------------- @@ -115,8 +161,10 @@ GET twitter/_validate/query?explain=true } } -------------------------------------------------- +// TEST[continued] -responds with: + +The API returns the following response: [source,console-result] -------------------------------------------------- @@ -136,11 +184,11 @@ responds with: -------------------------------------------------- // TESTRESPONSE[s/"error" : "[^\"]+"/"error": "$body.explanations.0.error"/] -When the query is valid, the explanation defaults to the string -representation of that query. With `rewrite` set to `true`, the explanation -is more detailed showing the actual Lucene query that will be executed. +===== The rewrite parameter -For More Like This: +When the query is valid, the explanation defaults to the string representation +of that query. With `rewrite` set to `true`, the explanation is more detailed +showing the actual Lucene query that will be executed. [source,console] -------------------------------------------------- @@ -158,7 +206,8 @@ GET twitter/_validate/query?rewrite=true -------------------------------------------------- // TEST[skip:the output is randomized depending on which shard we hit] -Response: + +The API returns the following response: [source,console-result] -------------------------------------------------- @@ -179,13 +228,25 @@ Response: } -------------------------------------------------- + +===== Rewrite and all_shards parameters + By default, the request is executed on a single shard only, which is randomly selected. The detailed explanation of the query may depend on which shard is being hit, and therefore may vary from one request to another. So, in case of query rewrite the `all_shards` parameter should be used to get response from all available shards. -For Fuzzy Queries: +//// +[source,console] +-------------------------------------------------- +PUT twitter/_bulk?refresh +{"index":{"_id":1}} +{"user" : "kimchy", "post_date" : "2009-11-15T14:12:12", "message" : "trying out Elasticsearch"} +{"index":{"_id":2}} +{"user" : "kimchi", "post_date" : "2009-11-15T14:12:13", "message" : "My username is similar to @kimchy!"} +-------------------------------------------------- +//// [source,console] -------------------------------------------------- @@ -201,8 +262,9 @@ GET twitter/_validate/query?rewrite=true&all_shards=true } } -------------------------------------------------- +// TEST[continued] -Response: +The API returns the following response: [source,console-result] -------------------------------------------------- diff --git a/docs/reference/security/index.asciidoc b/docs/reference/security/index.asciidoc new file mode 100644 index 00000000000..ed11b5916cb --- /dev/null +++ b/docs/reference/security/index.asciidoc @@ -0,0 +1,18 @@ +[[secure-cluster]] += Secure a cluster + +[partintro] +-- +The {stack-security-features} enable you to easily secure a cluster. You can +password-protect your data as well as implement more advanced security +measures such as encrypting communications, role-based access control, +IP filtering, and auditing. + +* <> +* <> + +-- + +include::overview.asciidoc[] + +include::{xes-repo-dir}/security/configuring-es.asciidoc[] diff --git a/docs/reference/security/overview.asciidoc b/docs/reference/security/overview.asciidoc new file mode 100644 index 00000000000..c06f67b9e92 --- /dev/null +++ b/docs/reference/security/overview.asciidoc @@ -0,0 +1,63 @@ +[role="xpack"] +[[elasticsearch-security]] +== Security overview +++++ +Overview +++++ + +Security protects {es} clusters by: + +* <> + with password protection, role-based access control, and IP filtering. +* <> + with SSL/TLS encryption. +* <> + so you know who's doing what to your cluster and the data it stores. + +[float] +[[preventing-unauthorized-access]] +=== Preventing unauthorized access + +To prevent unauthorized access to your {es} cluster, you must have a +way to _authenticate_ users. This simply means that you need a way to validate +that a user is who they claim to be. For example, you have to make sure only +the person named _Kelsey Andorra_ can sign in as the user `kandorra`. The +{es-security-features} provide a standalone authentication mechanism that enables +you to quickly password-protect your cluster. If you're already using LDAP, +Active Directory, or PKI to manage users in your organization, the +{security-features} are able to integrate with those systems to perform user +authentication. + +In many cases, simply authenticating users isn't enough. You also need a way to +control what data users have access to and what tasks they can perform. The +{es-security-features} enable you to _authorize_ users by assigning access +_privileges_ to _roles_ and assigning those roles to users. For example, this +role-based access control mechanism (a.k.a RBAC) enables you to specify that the +user `kandorra` can only perform read operations on the `events` index and can't +do anything at all with other indices. + +The {security-features} also support IP-based authorization. +You can whitelist and blacklist specific IP addresses or subnets to control +network-level access to a server. + +[float] +[[preserving-data-integrity]] +=== Preserving data integrity + +A critical part of security is keeping confidential data confidential. +{es} has built-in protections against accidental data loss and +corruption. However, there's nothing to stop deliberate tampering or data +interception. The {stack-security-features} preserve the integrity of your +data by encrypting communications to and from nodes. For even +greater protection, you can increase the <>. + +[float] +[[maintaining-audit-trail]] +=== Maintaining an audit trail + +Keeping a system secure takes vigilance. By using {stack-security-features} to +maintain an audit trail, you can easily see who is accessing your cluster and +what they're doing. By analyzing access patterns and failed attempts to access +your cluster, you can gain insights into attempted attacks and data breaches. +Keeping an auditable log of the activity in your cluster can also help diagnose +operational issues. diff --git a/docs/reference/settings/data-frames-settings.asciidoc b/docs/reference/settings/data-frames-settings.asciidoc index e550063f7b4..bd212a4fbb2 100644 --- a/docs/reference/settings/data-frames-settings.asciidoc +++ b/docs/reference/settings/data-frames-settings.asciidoc @@ -1,13 +1,13 @@ [role="xpack"] [[data-frames-settings]] -=== {dataframe-transforms-cap} settings in Elasticsearch +=== {transforms-cap} settings in Elasticsearch [subs="attributes"] ++++ -{dataframe-transforms-cap} settings +{transforms-cap} settings ++++ -You do not need to configure any settings to use {dataframe-transforms}. It is enabled by default. +You do not need to configure any settings to use {transforms}. It is enabled by default. All of these settings can be added to the `elasticsearch.yml` configuration file. The dynamic settings can also be updated across a cluster with the @@ -18,23 +18,23 @@ file. [float] [[general-data-frames-settings]] -==== General {dataframe-transforms} settings +==== General {transforms} settings -`xpack.data_frame.enabled`:: -Set to `true` (default) to enable {dataframe-transforms} on the node. + +`xpack.transform.enabled`:: +Set to `true` (default) to enable {transforms} on the node. + + -If set to `false` in `elasticsearch.yml`, the {dataframe-transform} APIs are disabled on the node. -Therefore the node cannot start or administrate transforms or receive transport (internal) -communication requests related to {dataframe-transform} APIs. +If set to `false` in `elasticsearch.yml`, the {transform} APIs are disabled on the node. +Therefore the node cannot start or administrate {transform} or receive transport (internal) +communication requests related to {transform} APIs. + -IMPORTANT: If you want to use {dataframe-transform} features in your cluster, you must have -`xpack.data_frame.enabled` set to `true` on all master-eligible nodes. This is the +IMPORTANT: If you want to use {transform} features in your cluster, you must have +`xpack.transform.enabled` set to `true` on all master-eligible nodes. This is the default behavior. -`xpack.data_frame.num_transform_failure_retries` (<>):: -The number of times that a {dataframe-transform} retries when it experiences a -non-fatal error. Once the number of retries is exhausted, the {dataframe-transform} +`xpack.transform.num_transform_failure_retries` (<>):: +The number of times that a {transform} retries when it experiences a +non-fatal error. Once the number of retries is exhausted, the {transform} task will be marked as `failed`. The default value is `10` with a valid minimum of `0` and maximum of `100`. -If a {dataframe-transform} is already running, it will have to be restarted +If a {transform} is already running, it will have to be restarted to use the changed setting. diff --git a/docs/reference/settings/ilm-settings.asciidoc b/docs/reference/settings/ilm-settings.asciidoc index 0f0d94cedc2..80c20b59b2b 100644 --- a/docs/reference/settings/ilm-settings.asciidoc +++ b/docs/reference/settings/ilm-settings.asciidoc @@ -2,6 +2,15 @@ [[ilm-settings]] === {ilm-cap} settings +These are the settings available for configuring Index Lifecycle Management + +==== Cluster level settings + +`xpack.ilm.enabled`:: +Whether ILM is enabled or disabled, setting this to `false` disables any +ILM REST API endpoints and functionality. Defaults to `true`. + +==== Index level settings These index-level {ilm-init} settings are typically configured through index templates. For more information, see <>. @@ -17,3 +26,8 @@ information about rollover, see <>. `indices.lifecycle.poll_interval`:: (<>) How often {ilm} checks for indices that meet policy criteria. Defaults to `10m`. + +`index.lifecycle.origination_date`:: +The timestamp that will be used to calculate the index age for its phase +transitions. This allows the users to create an index containing old data and +use the original creation date of the old data to calculate the index age. diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index a6672aa417a..e9bf5c9cfdf 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -212,7 +212,7 @@ information, see {stack-ov}/setting-up-authentication.html[Setting up authentica ===== Settings valid for all realms `type`:: -The type of the realm: `native, `ldap`, `active_directory`, `pki`, or `file`. Required. +The type of the realm: `native`, `ldap`, `active_directory`, `pki`, or `file`. Required. `order`:: The priority of the realm within the realm chain. Realms with a lower order are diff --git a/docs/reference/sql/endpoints/client-apps/squirrel.asciidoc b/docs/reference/sql/endpoints/client-apps/squirrel.asciidoc index bea4644cb91..a050f616b8b 100644 --- a/docs/reference/sql/endpoints/client-apps/squirrel.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/squirrel.asciidoc @@ -12,6 +12,7 @@ IMPORTANT: Elastic does not endorse, promote or provide support for this applica ==== Prerequisites +* SQuirreL SQL version 4.0.0 or higher * {es-sql} <> ==== Add {es} JDBC Driver @@ -20,22 +21,21 @@ To add the {es} JDBC driver, use *Windows* > *View Drivers* menu (or Ctrl+Shift+ image:images/sql/client-apps/squirell-1-view-drivers.png[] -This opens up the `Drivers` panel on the left. Click on the `+` sign to create a new driver: +Select *Elasticsearch* profile from the `Drivers` panel on the left-hand side (if it is missing check the SQuirreL SQL version or add a new entry to the list through the `+' button in the upper left corner): -image:images/sql/client-apps/squirell-2-new-driver.png[] +image:images/sql/client-apps/squirell-2-select-driver.png[] -Select the *Extra Class Path* tab and *Add* the JDBC jar. *List Drivers* to have the `Class Name` filled-in -automatically and name the connection: +Select the *Extra Class Path* tab and *Add* the JDBC jar. Name the connection and *List Drivers* to have `Class Name` populated if it is not already filled-in : image:images/sql/client-apps/squirell-3-add-driver.png[] -The driver should now appear in the list: +The driver should now appear in the list with a blue check mark next to its name: image:images/sql/client-apps/squirell-4-driver-list.png[] ==== Add an alias for {es} -Add a new connection or in SQuirelL terminology an _alias_ using the new driver. To do so, select the *Aliases* panel on the left and click the `+` sign: +Add a new connection or in SQuirreL terminology an _alias_ using the new driver. To do so, select the *Aliases* panel on the left and click the `+` sign: image:images/sql/client-apps/squirell-5-add-alias.png[] @@ -47,6 +47,6 @@ The setup is completed. Double check it by clicking on *Test Connection*. ==== Execute SQL queries -The connection should open automatically (if it has been created before simply click on *Connect* in the *Alias* panel). SQuirelL SQL can now issue SQL commands to {es}: +The connection should open automatically (if it has been created before simply click on *Connect* in the *Alias* panel). SQuirreL SQL can now issue SQL commands to {es}: image:images/sql/client-apps/squirell-7-data.png[] diff --git a/docs/reference/sql/functions/date-time.asciidoc b/docs/reference/sql/functions/date-time.asciidoc index 45231393521..3f515ec98e2 100644 --- a/docs/reference/sql/functions/date-time.asciidoc +++ b/docs/reference/sql/functions/date-time.asciidoc @@ -8,7 +8,7 @@ [[sql-functions-datetime-interval]] ==== Intervals -A common requirement when dealing with date/time in general revolves around +A common requirement when dealing with date/time in general revolves around the notion of `interval`, a topic that is worth exploring in the context of {es} and {es-sql}. {es} has comprehensive support for <> both inside <> and <>. @@ -248,6 +248,79 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[filterNow] Currently, using a _precision_ greater than 3 doesn't make any difference to the output of the function as the maximum number of second fractional digits returned is 3 (milliseconds). +[[sql-functions-datetime-trunc]] +==== `DATE_TRUNC` + +.Synopsis: +[source, sql] +-------------------------------------------------- +DATE_TRUNC( + string_exp, <1> + datetime_exp) <2> +-------------------------------------------------- + +*Input*: + +<1> string expression denoting the unit to which the date/datetime should be truncated to +<2> date/datetime expression + +*Output*: datetime + +.Description: + +Truncate the date/datetime to the specified unit by setting all fields that are less significant than the specified +one to zero (or one, for day, day of week and month). + +[cols="^,^"] +|=== +2+h|Datetime truncation units + +s|unit +s|abbreviations + +| millennium | millennia +| century | centuries +| decade | decades +| year | years, yy, yyyy +| quarter | quarters, qq, q +| month | months, mm, m +| week | weeks, wk, ww +| day | days, dd, d +| hour | hours, hh +| minute | minutes, mi, n +| second | seconds, ss, s +| millisecond | milliseconds, ms +| microsecond | microseconds, mcs +| nanosecond | nanoseconds, ns +|=== + + + +[source, sql] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/docs.csv-spec[truncateDateTimeMillennium] +-------------------------------------------------- + +[source, sql] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/docs.csv-spec[truncateDateTimeWeek] +-------------------------------------------------- + +[source, sql] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/docs.csv-spec[truncateDateTimeMinutes] +-------------------------------------------------- + +[source, sql] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/docs.csv-spec[truncateDateDecades] +-------------------------------------------------- + +[source, sql] +-------------------------------------------------- +include-tagged::{sql-specs}/docs/docs.csv-spec[truncateDateQuarter] +-------------------------------------------------- + [[sql-functions-datetime-day]] ==== `DAY_OF_MONTH/DOM/DAY` diff --git a/docs/reference/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc index 248c47452ba..b36dafd024d 100644 --- a/docs/reference/sql/functions/index.asciidoc +++ b/docs/reference/sql/functions/index.asciidoc @@ -51,6 +51,7 @@ ** <> ** <> ** <> +** <> ** <> ** <> ** <> diff --git a/docs/reference/sql/language/data-types.asciidoc b/docs/reference/sql/language/data-types.asciidoc index 7c5b4db045f..ee73a1eea7c 100644 --- a/docs/reference/sql/language/data-types.asciidoc +++ b/docs/reference/sql/language/data-types.asciidoc @@ -102,7 +102,7 @@ To do that, it will search for the first `keyword` that it can find that is _not Consider the following `string` mapping: -[source, js] +[source,js] ---- { "first_name" : { diff --git a/docs/reference/transform/api-quickref.asciidoc b/docs/reference/transform/api-quickref.asciidoc new file mode 100644 index 00000000000..9d2590a1540 --- /dev/null +++ b/docs/reference/transform/api-quickref.asciidoc @@ -0,0 +1,22 @@ +[role="xpack"] +[[df-api-quickref]] +== API quick reference + +All {transform} endpoints have the following base: + +[source,js] +---- +/_data_frame/transforms/ +---- +// NOTCONSOLE + +* {ref}/put-transform.html[Create {transforms}] +* {ref}/delete-transform.html[Delete {transforms}] +* {ref}/get-transform.html[Get {transforms}] +* {ref}/get-transform-stats.html[Get {transforms} statistics] +* {ref}/preview-transform.html[Preview {transforms}] +* {ref}/start-transform.html[Start {transforms}] +* {ref}/stop-transform.html[Stop {transforms}] +* {ref}/update-transform.html[Update {transforms}] + +For the full list, see {ref}/transform-apis.html[{transform-cap} APIs]. diff --git a/docs/reference/transform/apis/delete-transform.asciidoc b/docs/reference/transform/apis/delete-transform.asciidoc new file mode 100644 index 00000000000..06ac3c8f5d3 --- /dev/null +++ b/docs/reference/transform/apis/delete-transform.asciidoc @@ -0,0 +1,61 @@ +[role="xpack"] +[testenv="basic"] +[[delete-transform]] +=== Delete {transforms} API + +[subs="attributes"] +++++ +Delete {transforms} +++++ + +Deletes an existing {transform}. + +beta[] + +[[delete-transform-request]] +==== {api-request-title} + +`DELETE _data_frame/transforms/` + +[[delete-transform-prereqs]] +==== {api-prereq-title} + +* Before you can delete the {transform}, you must stop it. +* If the {es} {security-features} are enabled, you must have +`manage_data_frame_transforms` cluster privileges to use this API. The built-in +`data_frame_transforms_admin` role has these privileges. For more information, +see {stack-ov}/security-privileges.html[Security privileges] and +{stack-ov}/built-in-roles.html[Built-in roles]. + + +[[delete-transform-path-parms]] +==== {api-path-parms-title} + +``:: + (Required, string) Identifier for the {transform}. + +[[delete-transform-query-parms]] +==== {api-query-parms-title} + +`force`:: +(Optional, boolean) When `true`, the {transform} is deleted regardless of its +current state. The default value is `false`, meaning that the {transform} must be +`stopped` before it can be deleted. + +[[delete-transform-examples]] +==== {api-examples-title} + +[source,console] +-------------------------------------------------- +DELETE _data_frame/transforms/ecommerce_transform +-------------------------------------------------- +// TEST[skip:setup kibana sample data] + +When the {transform} is deleted, you receive the following results: + +[source,console-result] +---- +{ + "acknowledged" : true +} +---- diff --git a/docs/reference/data-frames/apis/get-transform-stats.asciidoc b/docs/reference/transform/apis/get-transform-stats.asciidoc similarity index 68% rename from docs/reference/data-frames/apis/get-transform-stats.asciidoc rename to docs/reference/transform/apis/get-transform-stats.asciidoc index 9b558b6b3c2..b4b485b52c5 100644 --- a/docs/reference/data-frames/apis/get-transform-stats.asciidoc +++ b/docs/reference/transform/apis/get-transform-stats.asciidoc @@ -1,24 +1,24 @@ [role="xpack"] [testenv="basic"] -[[get-data-frame-transform-stats]] -=== Get {dataframe-transform} statistics API +[[get-transform-stats]] +=== Get {transform} statistics API [subs="attributes"] ++++ -Get {dataframe-transform} statistics +Get {transform} statistics ++++ -Retrieves usage information for {dataframe-transforms}. +Retrieves usage information for {transforms}. beta[] -[[get-data-frame-transform-stats-request]] +[[get-transform-stats-request]] ==== {api-request-title} -`GET _data_frame/transforms//_stats` +`GET _data_frame/transforms//_stats` -`GET _data_frame/transforms/,/_stats` + +`GET _data_frame/transforms/,/_stats` + `GET _data_frame/transforms/_stats` + @@ -27,7 +27,7 @@ beta[] `GET _data_frame/transforms/*/_stats` + -[[get-data-frame-transform-stats-prereqs]] +[[get-transform-stats-prereqs]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have @@ -37,34 +37,34 @@ see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[[get-data-frame-transform-stats-desc]] +[[get-transform-stats-desc]] ==== {api-description-title} -You can get statistics for multiple {dataframe-transforms} in a single API +You can get statistics for multiple {transforms} in a single API request by using a comma-separated list of identifiers or a wildcard expression. -You can get statistics for all {dataframe-transforms} by using `_all`, by -specifying `*` as the ``, or by omitting the -``. +You can get statistics for all {transforms} by using `_all`, by +specifying `*` as the ``, or by omitting the +``. -[[get-data-frame-transform-stats-path-parms]] +[[get-transform-stats-path-parms]] ==== {api-path-parms-title} -``:: - (Optional, string) Identifier for the {dataframe-transform}. It can be a - {dataframe-transform} identifier or a wildcard expression. If you do not +``:: + (Optional, string) Identifier for the {transform}. It can be a + {transform} identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all - {dataframe-transforms}. + {transforms}. -[[get-data-frame-transform-stats-query-parms]] +[[get-transform-stats-query-parms]] ==== {api-query-parms-title} `allow_no_match`:: (Optional, boolean) Specifies what to do when the request: + -- -* Contains wildcard expressions and there are no {dataframe-transforms} that match. +* Contains wildcard expressions and there are no {transforms} that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. @@ -75,47 +75,45 @@ are no matches or only partial matches. -- `from`:: - (Optional, integer) Skips the specified number of {dataframe-transforms}. The + (Optional, integer) Skips the specified number of {transforms}. The default value is `0`. `size`:: - (Optional, integer) Specifies the maximum number of {dataframe-transforms} to obtain. The default value is `100`. + (Optional, integer) Specifies the maximum number of {transforms} to obtain. The default value is `100`. -[[get-data-frame-transform-stats-response]] +[[get-transform-stats-response]] ==== {api-response-body-title} `transforms`:: - (array) An array of statistics objects for {dataframe-transforms}, which are + (array) An array of statistics objects for {transforms}, which are sorted by the `id` value in ascending order. -[[get-data-frame-transform-stats-response-codes]] +[[get-transform-stats-response-codes]] ==== {api-response-codes-title} `404` (Missing resources):: If `allow_no_match` is `false`, this code indicates that there are no resources that match the request or only partial matches for the request. -[[get-data-frame-transform-stats-example]] +[[get-transform-stats-example]] ==== Examples -The following example skips for the first five {dataframe-transforms} and +The following example skips for the first five {transforms} and gets usage information for a maximum of ten results: -[source,js] +[source,console] -------------------------------------------------- GET _data_frame/transforms/_stats?from=5&size=10 -------------------------------------------------- -// CONSOLE // TEST[skip:todo] The following example gets usage information for the `ecommerce_transform` -{dataframe-transform}: +{transform}: -[source,js] +[source,console] -------------------------------------------------- GET _data_frame/transforms/ecommerce_transform/_stats -------------------------------------------------- -// CONSOLE // TEST[skip:todo] The API returns the following results: diff --git a/docs/reference/data-frames/apis/get-transform.asciidoc b/docs/reference/transform/apis/get-transform.asciidoc similarity index 65% rename from docs/reference/data-frames/apis/get-transform.asciidoc rename to docs/reference/transform/apis/get-transform.asciidoc index 27ccfcfe0a7..63b272ad7fb 100644 --- a/docs/reference/data-frames/apis/get-transform.asciidoc +++ b/docs/reference/transform/apis/get-transform.asciidoc @@ -1,23 +1,23 @@ [role="xpack"] [testenv="basic"] -[[get-data-frame-transform]] -=== Get {dataframe-transforms} API +[[get-transform]] +=== Get {transforms} API [subs="attributes"] ++++ -Get {dataframe-transforms} +Get {transforms} ++++ -Retrieves configuration information for {dataframe-transforms}. +Retrieves configuration information for {transforms}. beta[] -[[get-data-frame-transform-request]] +[[get-transform-request]] ==== {api-request-title} -`GET _data_frame/transforms/` + +`GET _data_frame/transforms/` + -`GET _data_frame/transforms/,` + +`GET _data_frame/transforms/,` + `GET _data_frame/transforms/` + @@ -25,7 +25,7 @@ beta[] `GET _data_frame/transforms/*` -[[get-data-frame-transform-prereqs]] +[[get-transform-prereqs]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have @@ -34,32 +34,31 @@ beta[] see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[[get-data-frame-transform-desc]] +[[get-transform-desc]] ==== {api-description-title} -You can get information for multiple {dataframe-transforms} in a single API +You can get information for multiple {transforms} in a single API request by using a comma-separated list of identifiers or a wildcard expression. -You can get information for all {dataframe-transforms} by using `_all`, by -specifying `*` as the ``, or by omitting the -``. +You can get information for all {transforms} by using `_all`, by +specifying `*` as the ``, or by omitting the ``. -[[get-data-frame-transform-path-parms]] +[[get-transform-path-parms]] ==== {api-path-parms-title} -``:: - (Optional, string) Identifier for the {dataframe-transform}. It can be a - {dataframe-transform} identifier or a wildcard expression. If you do not +``:: + (Optional, string) Identifier for the {transform}. It can be a + {transform} identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all - {dataframe-transforms}. + {transforms}. -[[get-data-frame-transform-query-parms]] +[[get-transform-query-parms]] ==== {api-query-parms-title} `allow_no_match`:: (Optional, boolean) Specifies what to do when the request: + -- -* Contains wildcard expressions and there are no {dataframe-transforms} that match. +* Contains wildcard expressions and there are no {transforms} that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. @@ -70,46 +69,44 @@ are no matches or only partial matches. -- `from`:: - (Optional, integer) Skips the specified number of {dataframe-transforms}. The + (Optional, integer) Skips the specified number of {transforms}. The default value is `0`. `size`:: - (Optional, integer) Specifies the maximum number of {dataframe-transforms} to obtain. The default value is `100`. + (Optional, integer) Specifies the maximum number of {transforms} to obtain. The default value is `100`. -[[get-data-frame-transform-response]] +[[get-transform-response]] ==== {api-response-body-title} `transforms`:: - (array) An array of transform resources, which are sorted by the `id` value in - ascending order. See <>. + (array) An array of {transform} resources, which are sorted by the `id` value in + ascending order. See <>. -[[get-data-frame-transform-response-codes]] +[[get-transform-response-codes]] ==== {api-response-codes-title} `404` (Missing resources):: If `allow_no_match` is `false`, this code indicates that there are no resources that match the request or only partial matches for the request. -[[get-data-frame-transform-example]] +[[get-transform-example]] ==== {api-examples-title} -The following example retrieves information about a maximum of ten transforms: +The following example retrieves information about a maximum of ten {transforms}: -[source,js] +[source,console] -------------------------------------------------- GET _data_frame/transforms?size=10 -------------------------------------------------- -// CONSOLE // TEST[skip:setup kibana sample data] The following example gets configuration information for the -`ecommerce_transform` {dataframe-transform}: +`ecommerce_transform` {transform}: -[source,js] +[source,console] -------------------------------------------------- GET _data_frame/transforms/ecommerce_transform -------------------------------------------------- -// CONSOLE // TEST[skip:setup kibana sample data] The API returns the following results: diff --git a/docs/reference/transform/apis/index.asciidoc b/docs/reference/transform/apis/index.asciidoc new file mode 100644 index 00000000000..e496401d340 --- /dev/null +++ b/docs/reference/transform/apis/index.asciidoc @@ -0,0 +1,31 @@ +[role="xpack"] +[testenv="basic"] +[[transform-apis]] +== {transform-cap} APIs + +See also {stack-ov}/ml-dataframes.html[{transforms-cap}]. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +//CREATE +include::put-transform.asciidoc[] +//UPDATE +include::update-transform.asciidoc[] +//DELETE +include::delete-transform.asciidoc[] +//GET +include::get-transform.asciidoc[] +include::get-transform-stats.asciidoc[] +//PREVIEW +include::preview-transform.asciidoc[] +//START +include::start-transform.asciidoc[] +//STOP +include::stop-transform.asciidoc[] diff --git a/docs/reference/data-frames/apis/preview-transform.asciidoc b/docs/reference/transform/apis/preview-transform.asciidoc similarity index 78% rename from docs/reference/data-frames/apis/preview-transform.asciidoc rename to docs/reference/transform/apis/preview-transform.asciidoc index c0b0f6ad88a..e86f6c42bbb 100644 --- a/docs/reference/data-frames/apis/preview-transform.asciidoc +++ b/docs/reference/transform/apis/preview-transform.asciidoc @@ -1,42 +1,42 @@ [role="xpack"] [testenv="basic"] -[[preview-data-frame-transform]] -=== Preview {dataframe-transforms} API +[[preview-transform]] +=== Preview {transforms} API [subs="attributes"] ++++ -Preview {dataframe-transforms} +Preview {transforms} ++++ -Previews a {dataframe-transform}. +Previews a {transform}. beta[] -[[preview-data-frame-transform-request]] +[[preview-transform-request]] ==== {api-request-title} `POST _data_frame/transforms/_preview` -[[preview-data-frame-transform-prereq]] +[[preview-transform-prereq]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. The built-in `data_frame_transforms_admin` role has these privileges. You must also have `read` and `view_index_metadata` privileges on the source index for the -{dataframe-transform}. For more information, see +{transform}. For more information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[[preview-data-frame-transform-desc]] +[[preview-transform-desc]] ==== {api-description-title} This API generates a preview of the results that you will get when you run the -<> with the same +<> with the same configuration. It returns a maximum of 100 results. The calculations are based on all the current data in the source index. -[[preview-data-frame-transform-request-body]] +[[preview-transform-request-body]] ==== {api-request-body-title} `source`:: @@ -45,7 +45,7 @@ on all the current data in the source index. `index`::: (Required, string or array) The _source indices_ for the - {dataframe-transform}. It can be a single index, an index pattern (for + {transform}. It can be a single index, an index pattern (for example, `"myindex*"`), or an array of indices (for example, `["index1", "index2"]`). @@ -55,19 +55,19 @@ on all the current data in the source index. `pivot`:: (Required, object) Defines the pivot function `group by` fields and the - aggregation to reduce the data. See <>. + aggregation to reduce the data. See <>. -[[preview-data-frame-transform-response]] +[[preview-transform-response]] ==== {api-response-body-title} `preview`:: (array) An array of documents. In particular, they are the JSON representation of the documents that would be created in the destination index - by the {dataframe-transform}. + by the {transform}. ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- POST _data_frame/transforms/_preview { @@ -92,7 +92,6 @@ POST _data_frame/transforms/_preview } } -------------------------------------------------- -// CONSOLE // TEST[skip:set up sample data] The data that is returned for this example is as follows: diff --git a/docs/reference/data-frames/apis/put-transform.asciidoc b/docs/reference/transform/apis/put-transform.asciidoc similarity index 73% rename from docs/reference/data-frames/apis/put-transform.asciidoc rename to docs/reference/transform/apis/put-transform.asciidoc index f9e622488bc..49c3d7981dd 100644 --- a/docs/reference/data-frames/apis/put-transform.asciidoc +++ b/docs/reference/transform/apis/put-transform.asciidoc @@ -1,23 +1,23 @@ [role="xpack"] [testenv="basic"] -[[put-data-frame-transform]] -=== Create {dataframe-transforms} API +[[put-transform]] +=== Create {transforms} API [subs="attributes"] ++++ -Create {dataframe-transforms} +Create {transforms} ++++ -Instantiates a {dataframe-transform}. +Instantiates a {transform}. beta[] -[[put-data-frame-transform-request]] +[[put-transform-request]] ==== {api-request-title} -`PUT _data_frame/transforms/` +`PUT _data_frame/transforms/` -[[put-data-frame-transform-prereqs]] +[[put-transform-prereqs]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have @@ -28,79 +28,79 @@ have `read` and `view_index_metadata` privileges on the source index and `read`, information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[[put-data-frame-transform-desc]] +[[put-transform-desc]] ==== {api-description-title} -This API defines a {dataframe-transform}, which copies data from source indices, +This API defines a {transform}, which copies data from source indices, transforms it, and persists it into an entity-centric destination index. The entities are defined by the set of `group_by` fields in the `pivot` object. You can also think of the destination index as a two-dimensional tabular data structure (known as a {dataframe}). The ID for each document in the {dataframe} is generated from a hash of the entity, so there is a unique row per entity. For more information, see -{stack-ov}/ml-dataframes.html[{dataframe-transforms-cap}]. +{stack-ov}/ml-dataframes.html[{transforms-cap}]. -When the {dataframe-transform} is created, a series of validations occur to +When the {transform} is created, a series of validations occur to ensure its success. For example, there is a check for the existence of the source indices and a check that the destination index is not part of the source index pattern. You can use the `defer_validation` parameter to skip these checks. -Deferred validations are always run when the {dataframe-transform} is started, +Deferred validations are always run when the {transform} is started, with the exception of privilege checks. When {es} {security-features} are -enabled, the {dataframe-transform} remembers which roles the user that created +enabled, the {transform} remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the -{dataframe-transform} fails when it attempts unauthorized operations. +{transform} fails when it attempts unauthorized operations. -IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. - Do not put a {dataframe-transform} directly into any +IMPORTANT: You must use {kib} or this API to create a {transform}. + Do not put a {transform} directly into any `.data-frame-internal*` indices using the Elasticsearch index API. If {es} {security-features} are enabled, do not give users any privileges on `.data-frame-internal*` indices. -[[put-data-frame-transform-path-parms]] +[[put-transform-path-parms]] ==== {api-path-parms-title} -``:: - (Required, string) Identifier for the {dataframe-transform}. This identifier +``:: + (Required, string) Identifier for the {transform}. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -[[put-data-frame-transform-query-parms]] +[[put-transform-query-parms]] ==== {api-query-parms-title} `defer_validation`:: (Optional, boolean) When `true`, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the - {dataframe-transform} is created. + {transform} is created. -[[put-data-frame-transform-request-body]] +[[put-transform-request-body]] ==== {api-request-body-title} `description`:: - (Optional, string) Free text description of the {dataframe-transform}. + (Optional, string) Free text description of the {transform}. `dest`:: (Required, object) Required. The destination configuration, which has the following properties: `index`::: - (Required, string) The _destination index_ for the {dataframe-transform}. + (Required, string) The _destination index_ for the {transform}. `pipeline`::: (Optional, string) The unique identifier for a <>. `frequency`:: (Optional, <>) The interval between checks for changes in the source - indices when the {dataframe-transform} is running continuously. Also determines - the retry interval in the event of transient failures while the {dataframe-transform} is + indices when the {transform} is running continuously. Also determines + the retry interval in the event of transient failures while the {transform} is searching or indexing. The minimum value is `1s` and the maximum is `1h`. The default value is `1m`. `pivot`:: (Required, object) Defines the pivot function `group by` fields and the aggregation to - reduce the data. See <>. + reduce the data. See <>. `source`:: (Required, object) The source configuration, which has the following @@ -108,7 +108,7 @@ IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. `index`::: (Required, string or array) The _source indices_ for the - {dataframe-transform}. It can be a single index, an index pattern (for + {transform}. It can be a single index, an index pattern (for example, `"myindex*"`), or an array of indices (for example, `["index1", "index2"]`). @@ -119,7 +119,7 @@ IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}. `sync`:: (Optional, object) Defines the properties required to run continuously. `time`::: - (Required, object) Specifies that the {dataframe-transform} uses a time + (Required, object) Specifies that the {transform} uses a time field to synchronize the source and destination indices. `field`:::: (Required, string) The date field that is used to identify new documents @@ -136,10 +136,10 @@ delays. (Optional, <>) The time delay between the current time and the latest input data time. The default value is `60s`. -[[put-data-frame-transform-example]] +[[put-transform-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- PUT _data_frame/transforms/ecommerce_transform { @@ -183,10 +183,9 @@ PUT _data_frame/transforms/ecommerce_transform } } -------------------------------------------------- -// CONSOLE // TEST[setup:kibana_sample_data_ecommerce] -When the transform is created, you receive the following results: +When the {transform} is created, you receive the following results: [source,console-result] ---- diff --git a/docs/reference/data-frames/apis/start-transform.asciidoc b/docs/reference/transform/apis/start-transform.asciidoc similarity index 56% rename from docs/reference/data-frames/apis/start-transform.asciidoc rename to docs/reference/transform/apis/start-transform.asciidoc index b20c5c87253..3480ddea86f 100644 --- a/docs/reference/data-frames/apis/start-transform.asciidoc +++ b/docs/reference/transform/apis/start-transform.asciidoc @@ -1,74 +1,73 @@ [role="xpack"] [testenv="basic"] -[[start-data-frame-transform]] -=== Start {dataframe-transforms} API +[[start-transform]] +=== Start {transforms} API [subs="attributes"] ++++ -Start {dataframe-transforms} +Start {transforms} ++++ -Starts one or more {dataframe-transforms}. +Starts one or more {transforms}. beta[] -[[start-data-frame-transform-request]] +[[start-transform-request]] ==== {api-request-title} -`POST _data_frame/transforms//_start` +`POST _data_frame/transforms//_start` -[[start-data-frame-transform-prereqs]] +[[start-transform-prereqs]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have `manage_data_frame_transforms` cluster privileges to use this API. You must also have `view_index_metadata` privileges on the source index for the -{dataframe-transform}. For more information, see +{transform}. For more information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[[start-data-frame-transform-desc]] +[[start-transform-desc]] ==== {api-description-title} -When you start a {dataframe-transform}, it creates the destination index if it +When you start a {transform}, it creates the destination index if it does not already exist. The `number_of_shards` is set to `1` and the `auto_expand_replicas` is set to `0-1`. -The transform deduces the mapping definitions from the source indices. For +The {transform} deduces the mapping definitions from the source indices. For scripted fields, it uses <>. If a field in the destination index is created by `scripted_metric` or `bucket_script` -aggregations, the transform uses dynamic mappings unless a template exists or +aggregations, the {transform} uses dynamic mappings unless a template exists or the destination index already exists. Mapping definitions in the destination index take precedence over dynamic mappings and templates. -When the {dataframe-transform} starts, a series of validations occur to ensure +When the {transform} starts, a series of validations occur to ensure its success. If you deferred validation when you created the -{dataframe-transform}, they occur when you start the transform--with the +{transform}, they occur when you start the {transform}--with the exception of privilege checks. When {es} {security-features} are enabled, the -{dataframe-transform} remembers which roles the user that created it had at the +{transform} remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the -{dataframe-transform} fails when it attempts unauthorized operations. +{transform} fails when it attempts unauthorized operations. -[[start-data-frame-transform-path-parms]] +[[start-transform-path-parms]] ==== {api-path-parms-title} -``:: - (Required, string) Identifier for the {dataframe-transform}. This identifier +``:: + (Required, string) Identifier for the {transform}. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -[[start-data-frame-transform-example]] +[[start-transform-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- POST _data_frame/transforms/ecommerce_transform/_start -------------------------------------------------- -// CONSOLE // TEST[skip:set up kibana samples] -When the {dataframe-transform} starts, you receive the following results: +When the {transform} starts, you receive the following results: [source,console-result] ---- diff --git a/docs/reference/data-frames/apis/stop-transform.asciidoc b/docs/reference/transform/apis/stop-transform.asciidoc similarity index 60% rename from docs/reference/data-frames/apis/stop-transform.asciidoc rename to docs/reference/transform/apis/stop-transform.asciidoc index 234a43def1d..c294186b6c3 100644 --- a/docs/reference/data-frames/apis/stop-transform.asciidoc +++ b/docs/reference/transform/apis/stop-transform.asciidoc @@ -1,27 +1,29 @@ [role="xpack"] [testenv="basic"] -[[stop-data-frame-transform]] -=== Stop {dataframe-transforms} API +[[stop-transform]] +=== Stop {transforms} API [subs="attributes"] ++++ -Stop {dataframe-transforms} +Stop {transforms} ++++ -Stops one or more {dataframe-transforms}. +Stops one or more {transforms}. beta[] -[[stop-data-frame-transform-request]] + +[[stop-transform-request]] ==== {api-request-title} -`POST _data_frame/transforms//_stop` + +`POST _data_frame/transforms//_stop` + -`POST _data_frame/transforms/,/_stop` + +`POST _data_frame/transforms/,/_stop` + `POST _data_frame/transforms/_all/_stop` -[[stop-data-frame-transform-prereq]] + +[[stop-transform-prereq]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have @@ -30,50 +32,58 @@ beta[] see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[[stop-data-frame-transform-desc]] + +[[stop-transform-desc]] ==== {api-description-title} -You can stop multiple {dataframe-transforms} in a single API request by using a -comma-separated list of {dataframe-transforms} or a wildcard expression. -All {dataframe-transforms} can be stopped by using `_all` or `*` as the -``. +You can stop multiple {transforms} in a single API request by using a +comma-separated list of {transforms} or a wildcard expression. +All {transforms} can be stopped by using `_all` or `*` as the +``. -[[stop-data-frame-transform-path-parms]] + +[[stop-transform-path-parms]] ==== {api-path-parms-title} -``:: - (Required, string) Identifier for the {dataframe-transform}. This identifier +``:: + (Required, string) Identifier for the {transform}. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -[[stop-data-frame-transform-query-parms]] + +[[stop-transform-query-parms]] ==== {api-query-parms-title} `allow_no_match`:: (Optional, boolean) Specifies what to do when the request: + -- -* Contains wildcard expressions and there are no {dataframe-transforms} that match. +* Contains wildcard expressions and there are no {transforms} that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns a successful acknowledgement message when there are no matches. When there are only partial matches, the API stops -the appropriate {dataframe-transforms}. For example, if the request contains -`test-id1*,test-id2*` as the identifiers and there are no {dataframe-transforms} -that match `test-id2*`, the API nonetheless stops the {dataframe-transforms} +the appropriate {transforms}. For example, if the request contains +`test-id1*,test-id2*` as the identifiers and there are no {transforms} +that match `test-id2*`, the API nonetheless stops the {transforms} that match `test-id1*`. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. -- +`force`:: + (Optional, boolean) Set to `true` to stop a failed {transform} or to + forcefully stop a {transform} that did not respond to the initial stop + request. + `timeout`:: (Optional, time value) If `wait_for_completion=true`, the API blocks for (at - maximum) the specified duration while waiting for the transform to stop. If + maximum) the specified duration while waiting for the {transform} to stop. If more than `timeout` time has passed, the API throws a timeout exception. Even if a timeout exception is thrown, the stop request is still processing and - eventually moves the transform to `STOPPED`. The timeout simply means the API + eventually moves the {transform} to `STOPPED`. The timeout simply means the API call itself timed out while waiting for the status change. Defaults to `30s` `wait_for_completion`:: @@ -81,24 +91,25 @@ are no matches or only partial matches. state completely stops. If set to `false`, the API returns immediately and the indexer will be stopped asynchronously in the background. Defaults to `false`. -[[stop-data-frame-transform-response-codes]] + +[[stop-transform-response-codes]] ==== {api-response-codes-title} `404` (Missing resources):: If `allow_no_match` is `false`, this code indicates that there are no resources that match the request or only partial matches for the request. -[[stop-data-frame-transform-example]] + +[[stop-transform-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- POST _data_frame/transforms/ecommerce_transform/_stop -------------------------------------------------- -// CONSOLE // TEST[skip:set up kibana samples] -When the {dataframe-transform} stops, you receive the following results: +When the {transform} stops, you receive the following results: [source,console-result] ---- diff --git a/docs/reference/data-frames/apis/transformresource.asciidoc b/docs/reference/transform/apis/transformresource.asciidoc similarity index 64% rename from docs/reference/data-frames/apis/transformresource.asciidoc rename to docs/reference/transform/apis/transformresource.asciidoc index f51074bd456..55b2095a6ec 100644 --- a/docs/reference/data-frames/apis/transformresource.asciidoc +++ b/docs/reference/transform/apis/transformresource.asciidoc @@ -1,70 +1,70 @@ [role="xpack"] [testenv="basic"] -[[data-frame-transform-resource]] -=== {dataframe-transform-cap} resources +[[transform-resource]] +=== {transform-cap} resources -{dataframe-transform-cap} resources relate to the <>. +{transform-cap} resources relate to the <>. For more information, see {stack-ov}/ecommerce-dataframes.html[Transforming your data with {dataframes}]. [discrete] -[[data-frame-transform-properties]] +[[transform-properties]] ==== {api-definitions-title} `description`:: - (string) A description of the {dataframe-transform}. + (string) A description of the {transform}. `dest`:: - (object) The destination for the {dataframe-transform}. See - <>. + (object) The destination for the {transform}. See + <>. `frequency`:: (time units) The interval between checks for changes in the source indices - when the {dataframe-transform} is running continuously. Also determines the - retry interval in the event of transient failures while the {dataframe-transform} is + when the {transform} is running continuously. Also determines the + retry interval in the event of transient failures while the {transform} is searching or indexing. The minimum value is `1s` and the maximum is `1h`. The default value is `1m`. `id`:: - (string) A unique identifier for the {dataframe-transform}. + (string) A unique identifier for the {transform}. `pivot`:: (object) The method for transforming the data. See - <>. + <>. `source`:: - (object) The source of the data for the {dataframe-transform}. See - <>. + (object) The source of the data for the {transform}. See + <>. -[[data-frame-transform-dest]] +[[transform-dest]] ==== Dest objects -{dataframe-transform-cap} resources contain `dest` objects. For example, when -you create a {dataframe-transform}, you must define its destination. +{transform-cap} resources contain `dest` objects. For example, when +you create a {transform}, you must define its destination. [discrete] -[[data-frame-transform-dest-properties]] +[[transform-dest-properties]] ===== {api-definitions-title} `index`:: - (string) The _destination index_ for the {dataframe-transform}. + (string) The _destination index_ for the {transform}. `pipeline`:: (string) The unique identifier for a <>. -[[data-frame-transform-source]] +[[transform-source]] ==== Source objects -{dataframe-transform-cap} resources contain `source` objects. For example, when -you create a {dataframe-transform}, you must define its source. +{transform-cap} resources contain `source` objects. For example, when +you create a {transform}, you must define its source. [discrete] -[[data-frame-transform-source-properties]] +[[transform-source-properties]] ===== {api-definitions-title} `index`:: - (string or array) The _source indices_ for the {dataframe-transform}. It can + (string or array) The _source indices_ for the {transform}. It can be a single index, an index pattern (for example, `"myindex*"`), or an array of indices (for example, `["index1", "index2"]`). @@ -72,14 +72,14 @@ you create a {dataframe-transform}, you must define its source. (object) A query clause that retrieves a subset of data from the source index. See <>. -[[data-frame-transform-pivot]] +[[transform-pivot]] ==== Pivot objects -{dataframe-transform-cap} resources contain `pivot` objects, which define the +{transform-cap} resources contain `pivot` objects, which define the pivot function `group by` fields and the aggregation to reduce the data. [discrete] -[[data-frame-transform-pivot-properties]] +[[transform-pivot-properties]] ===== {api-definitions-title} `aggregations` or `aggs`:: @@ -100,7 +100,7 @@ pivot function `group by` fields and the aggregation to reduce the data. * {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[Bucket Script] * {ref}/search-aggregations-pipeline-bucket-selector-aggregation.html[Bucket Selector] -IMPORTANT: {dataframe-transforms-cap} support a subset of the functionality in +IMPORTANT: {transforms-cap} support a subset of the functionality in composite aggregations. See {stack-ov}/dataframe-limitations.html[{dataframe-cap} limitations]. @@ -122,8 +122,8 @@ composite aggregations. See dynamically adjusted to a lower value. The minimum value is `10` and the maximum is `10,000`. The default value is `500`. -[[data-frame-transform-example]] +[[transform-example]] ==== {api-examples-title} See the -<>. +<>. diff --git a/docs/reference/data-frames/apis/update-transform.asciidoc b/docs/reference/transform/apis/update-transform.asciidoc similarity index 74% rename from docs/reference/data-frames/apis/update-transform.asciidoc rename to docs/reference/transform/apis/update-transform.asciidoc index 0b1cda083ce..756f1a1e187 100644 --- a/docs/reference/data-frames/apis/update-transform.asciidoc +++ b/docs/reference/transform/apis/update-transform.asciidoc @@ -1,23 +1,23 @@ [role="xpack"] [testenv="basic"] -[[update-data-frame-transform]] -=== Update {dataframe-transforms} API +[[update-transform]] +=== Update {transforms} API [subs="attributes"] ++++ -Update {dataframe-transforms} +Update {transforms} ++++ -Updates an existing {dataframe-transform}. +Updates an existing {transform}. beta[] -[[update-data-frame-transform-request]] +[[update-transform-request]] ==== {api-request-title} -`POST _data_frame/transforms//_update` +`POST _data_frame/transforms//_update` -[[update-data-frame-transform-prereqs]] +[[update-transform-prereqs]] ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have @@ -28,60 +28,60 @@ have `read` and `view_index_metadata` privileges on the source index and `read`, information, see {stack-ov}/security-privileges.html[Security privileges] and {stack-ov}/built-in-roles.html[Built-in roles]. -[[update-data-frame-transform-desc]] +[[update-transform-desc]] ==== {api-description-title} -This API updates an existing {dataframe-transform}. All settings except description do not -take effect until after the {dataframe-transform} starts the next checkpoint. This is +This API updates an existing {transform}. All settings except description do not +take effect until after the {transform} starts the next checkpoint. This is so there is consistency with the pivoted data in each checkpoint. -IMPORTANT: When {es} {security-features} are enabled, your {dataframe-transform} +IMPORTANT: When {es} {security-features} are enabled, your {transform} remembers which roles the user who updated it had at the time of update and runs with those privileges. -IMPORTANT: You must use {kib} or this API to update a {dataframe-transform}. - Do not update a {dataframe-transform} directly via +IMPORTANT: You must use {kib} or this API to update a {transform}. + Do not update a {transform} directly via `.data-frame-internal*` indices using the Elasticsearch index API. If {es} {security-features} are enabled, do not give users any privileges on `.data-frame-internal*` indices. -[[update-data-frame-transform-path-parms]] +[[update-transform-path-parms]] ==== {api-path-parms-title} -``:: - (Required, string) Identifier for the {dataframe-transform}. This identifier +``:: + (Required, string) Identifier for the {transform}. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. -[[update-data-frame-transform-query-parms]] +[[update-transform-query-parms]] ==== {api-query-parms-title} `defer_validation`:: (Optional, boolean) When `true`, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the - {dataframe-transform} is updated. + {transform} is updated. -[[update-data-frame-transform-request-body]] +[[update-transform-request-body]] ==== {api-request-body-title} `description`:: - (Optional, string) Free text description of the {dataframe-transform}. + (Optional, string) Free text description of the {transform}. `dest`:: (Optional, object) The destination configuration, which has the following properties: `index`::: - (Required, string) The _destination index_ for the {dataframe-transform}. + (Required, string) The _destination index_ for the {transform}. `pipeline`::: (Optional, string) The unique identifier for a <>. `frequency`:: (Optional, <>) The interval between checks for changes - in the source indices when the {dataframe-transform} is running continuously. + in the source indices when the {transform} is running continuously. Also determines the retry interval in the event of transient failures while - the {dataframe-transform} is searching or indexing. The minimum value is `1s` + the {transform} is searching or indexing. The minimum value is `1s` and the maximum is `1h`. The default value is `1m`. `source`:: @@ -90,7 +90,7 @@ IMPORTANT: You must use {kib} or this API to update a {dataframe-transform}. `index`::: (Required, string or array) The _source indices_ for the - {dataframe-transform}. It can be a single index, an index pattern (for + {transform}. It can be a single index, an index pattern (for example, `"myindex*"`), or an array of indices (for example, `["index1", "index2"]`). @@ -101,7 +101,7 @@ IMPORTANT: You must use {kib} or this API to update a {dataframe-transform}. `sync`:: (Optional, object) Defines the properties required to run continuously. `time`::: - (Required, object) Specifies that the {dataframe-transform} uses a time + (Required, object) Specifies that the {transform} uses a time field to synchronize the source and destination indices. `field`:::: (Required, string) The date field that is used to identify new documents @@ -118,10 +118,10 @@ delays. (Optional, <>) The time delay between the current time and the latest input data time. The default value is `60s`. -[[update-data-frame-transform-example]] +[[update-transform-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- POST _data_frame/transforms/simple-kibana-ecomm-pivot/_update { @@ -149,10 +149,9 @@ POST _data_frame/transforms/simple-kibana-ecomm-pivot/_update } } -------------------------------------------------- -// CONSOLE // TEST[setup:simple_kibana_continuous_pivot] -When the transform is updated, you receive the updated configuration: +When the {transform} is updated, you receive the updated configuration: [source,console-result] ---- diff --git a/docs/reference/transform/checkpoints.asciidoc b/docs/reference/transform/checkpoints.asciidoc new file mode 100644 index 00000000000..4c41b876b23 --- /dev/null +++ b/docs/reference/transform/checkpoints.asciidoc @@ -0,0 +1,88 @@ +[role="xpack"] +[[ml-transform-checkpoints]] +== How {transform} checkpoints work +++++ +How checkpoints work +++++ + +beta[] + +Each time a {transform} examines the source indices and creates or +updates the destination index, it generates a _checkpoint_. + +If your {transform} runs only once, there is logically only one +checkpoint. If your {transform} runs continuously, however, it creates +checkpoints as it ingests and transforms new source data. + +To create a checkpoint, the {ctransform}: + +. Checks for changes to source indices. ++ +Using a simple periodic timer, the {transform} checks for changes to +the source indices. This check is done based on the interval defined in the +transform's `frequency` property. ++ +If the source indices remain unchanged or if a checkpoint is already in progress +then it waits for the next timer. + +. Identifies which entities have changed. ++ +The {transform} searches to see which entities have changed since the +last time it checked. The `sync` configuration object in the {transform} +identifies a time field in the source indices. The {transform} uses the values +in that field to synchronize the source and destination indices. + +. Updates the destination index (the {dataframe}) with the changed entities. ++ +-- +The {transform} applies changes related to either new or changed +entities to the destination index. The set of changed entities is paginated. For +each page, the {transform} performs a composite aggregation using a +`terms` query. After all the pages of changes have been applied, the checkpoint +is complete. +-- + +This checkpoint process involves both search and indexing activity on the +cluster. We have attempted to favor control over performance while developing +{transforms}. We decided it was preferable for the +{transform} to take longer to complete, rather than to finish quickly +and take precedence in resource consumption. That being said, the cluster still +requires enough resources to support both the composite aggregation search and +the indexing of its results. + +TIP: If the cluster experiences unsuitable performance degradation due to the +{transform}, stop the {transform}. Consider whether you can apply a +source query to the {transform} to reduce the scope of data it +processes. Also consider whether the cluster has sufficient resources in place +to support both the composite aggregation search and the indexing of its +results. + +[discrete] +[[ml-transform-checkpoint-errors]] +==== Error handling + +Failures in {transforms} tend to be related to searching or indexing. +To increase the resiliency of {transforms}, the cursor positions of +the aggregated search and the changed entities search are tracked in memory and +persisted periodically. + +Checkpoint failures can be categorized as follows: + +* Temporary failures: The checkpoint is retried. If 10 consecutive failures +occur, the {transform} has a failed status. For example, this +situation might occur when there are shard failures and queries return only +partial results. +* Irrecoverable failures: The {transform} immediately fails. For +example, this situation occurs when the source index is not found. +* Adjustment failures: The {transform} retries with adjusted settings. +For example, if a parent circuit breaker memory errors occur during the +composite aggregation, the {transform} receives partial results. The aggregated +search is retried with a smaller number of buckets. This retry is performed at +the interval defined in the `frequency` property for the {transform}. If the +search is retried to the point where it reaches a minimal number of buckets, an +irrecoverable failure occurs. + +If the node running the {transforms} fails, the {transform} restarts +from the most recent persisted cursor position. This recovery process might +repeat some of the work the {transform} had already done, but it ensures data +consistency. diff --git a/docs/reference/transform/dataframe-examples.asciidoc b/docs/reference/transform/dataframe-examples.asciidoc new file mode 100644 index 00000000000..6c03ad3ecb3 --- /dev/null +++ b/docs/reference/transform/dataframe-examples.asciidoc @@ -0,0 +1,335 @@ +[role="xpack"] +[testenv="basic"] +[[dataframe-examples]] +== {transform-cap} examples +++++ +Examples +++++ + +beta[] + +These examples demonstrate how to use {transforms} to derive useful +insights from your data. All the examples use one of the +{kibana-ref}/add-sample-data.html[{kib} sample datasets]. For a more detailed, +step-by-step example, see +<>. + +* <> +* <> +* <> +* <> + +include::ecommerce-example.asciidoc[] + +[[example-best-customers]] +=== Finding your best customers + +In this example, we use the eCommerce orders sample dataset to find the customers +who spent the most in our hypothetical webshop. Let's transform the data such +that the destination index contains the number of orders, the total price of +the orders, the amount of unique products and the average price per order, +and the total amount of ordered products for each customer. + +[source,console] +---------------------------------- +POST _data_frame/transforms/_preview +{ + "source": { + "index": "kibana_sample_data_ecommerce" + }, + "dest" : { <1> + "index" : "sample_ecommerce_orders_by_customer" + }, + "pivot": { + "group_by": { <2> + "user": { "terms": { "field": "user" }}, + "customer_id": { "terms": { "field": "customer_id" }} + }, + "aggregations": { + "order_count": { "value_count": { "field": "order_id" }}, + "total_order_amt": { "sum": { "field": "taxful_total_price" }}, + "avg_amt_per_order": { "avg": { "field": "taxful_total_price" }}, + "avg_unique_products_per_order": { "avg": { "field": "total_unique_products" }}, + "total_unique_products": { "cardinality": { "field": "products.product_id" }} + } + } +} +---------------------------------- +// TEST[skip:setup kibana sample data] + +<1> This is the destination index for the {dataframe}. It is ignored by +`_preview`. +<2> Two `group_by` fields have been selected. This means the {dataframe} will +contain a unique row per `user` and `customer_id` combination. Within this +dataset both these fields are unique. By including both in the {dataframe} it +gives more context to the final results. + +NOTE: In the example above, condensed JSON formatting has been used for easier +readability of the pivot object. + +The preview {transforms} API enables you to see the layout of the +{dataframe} in advance, populated with some sample values. For example: + +[source,js] +---------------------------------- +{ + "preview" : [ + { + "total_order_amt" : 3946.9765625, + "order_count" : 59.0, + "total_unique_products" : 116.0, + "avg_unique_products_per_order" : 2.0, + "customer_id" : "10", + "user" : "recip", + "avg_amt_per_order" : 66.89790783898304 + }, + ... + ] + } +---------------------------------- +// NOTCONSOLE + +This {dataframe} makes it easier to answer questions such as: + +* Which customers spend the most? + +* Which customers spend the most per order? + +* Which customers order most often? + +* Which customers ordered the least number of different products? + +It's possible to answer these questions using aggregations alone, however +{dataframes} allow us to persist this data as a customer centric index. This +enables us to analyze data at scale and gives more flexibility to explore and +navigate data from a customer centric perspective. In some cases, it can even +make creating visualizations much simpler. + +[[example-airline]] +=== Finding air carriers with the most delays + +In this example, we use the Flights sample dataset to find out which air carrier +had the most delays. First, we filter the source data such that it excludes all +the cancelled flights by using a query filter. Then we transform the data to +contain the distinct number of flights, the sum of delayed minutes, and the sum +of the flight minutes by air carrier. Finally, we use a +{ref}/search-aggregations-pipeline-bucket-script-aggregation.html[`bucket_script`] +to determine what percentage of the flight time was actually delay. + +[source,console] +---------------------------------- +POST _data_frame/transforms/_preview +{ + "source": { + "index": "kibana_sample_data_flights", + "query": { <1> + "bool": { + "filter": [ + { "term": { "Cancelled": false } } + ] + } + } + }, + "dest" : { <2> + "index" : "sample_flight_delays_by_carrier" + }, + "pivot": { + "group_by": { <3> + "carrier": { "terms": { "field": "Carrier" }} + }, + "aggregations": { + "flights_count": { "value_count": { "field": "FlightNum" }}, + "delay_mins_total": { "sum": { "field": "FlightDelayMin" }}, + "flight_mins_total": { "sum": { "field": "FlightTimeMin" }}, + "delay_time_percentage": { <4> + "bucket_script": { + "buckets_path": { + "delay_time": "delay_mins_total.value", + "flight_time": "flight_mins_total.value" + }, + "script": "(params.delay_time / params.flight_time) * 100" + } + } + } + } +} +---------------------------------- +// TEST[skip:setup kibana sample data] + +<1> Filter the source data to select only flights that were not cancelled. +<2> This is the destination index for the {dataframe}. It is ignored by +`_preview`. +<3> The data is grouped by the `Carrier` field which contains the airline name. +<4> This `bucket_script` performs calculations on the results that are returned +by the aggregation. In this particular example, it calculates what percentage of +travel time was taken up by delays. + +The preview shows you that the new index would contain data like this for each +carrier: + +[source,js] +---------------------------------- +{ + "preview" : [ + { + "carrier" : "ES-Air", + "flights_count" : 2802.0, + "flight_mins_total" : 1436927.5130677223, + "delay_time_percentage" : 9.335543983955839, + "delay_mins_total" : 134145.0 + }, + ... + ] +} +---------------------------------- +// NOTCONSOLE + +This {dataframe} makes it easier to answer questions such as: + +* Which air carrier has the most delays as a percentage of flight time? + +NOTE: This data is fictional and does not reflect actual delays +or flight stats for any of the featured destination or origin airports. + + +[[example-clientips]] +=== Finding suspicious client IPs by using scripted metrics + +With {transforms}, you can use +{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[scripted +metric aggregations] on your data. These aggregations are flexible and make +it possible to perform very complex processing. Let's use scripted metrics to +identify suspicious client IPs in the web log sample dataset. + +We transform the data such that the new index contains the sum of bytes and the +number of distinct URLs, agents, incoming requests by location, and geographic +destinations for each client IP. We also use a scripted field to count the +specific types of HTTP responses that each client IP receives. Ultimately, the +example below transforms web log data into an entity centric index where the +entity is `clientip`. + +[source,console] +---------------------------------- +POST _data_frame/transforms/_preview +{ + "source": { + "index": "kibana_sample_data_logs", + "query": { <1> + "range" : { + "timestamp" : { + "gte" : "now-30d/d" + } + } + } + }, + "dest" : { <2> + "index" : "sample_weblogs_by_clientip" + }, + "pivot": { + "group_by": { <3> + "clientip": { "terms": { "field": "clientip" } } + }, + "aggregations": { + "url_dc": { "cardinality": { "field": "url.keyword" }}, + "bytes_sum": { "sum": { "field": "bytes" }}, + "geo.src_dc": { "cardinality": { "field": "geo.src" }}, + "agent_dc": { "cardinality": { "field": "agent.keyword" }}, + "geo.dest_dc": { "cardinality": { "field": "geo.dest" }}, + "responses.total": { "value_count": { "field": "timestamp" }}, + "responses.counts": { <4> + "scripted_metric": { + "init_script": "state.responses = ['error':0L,'success':0L,'other':0L]", + "map_script": """ + def code = doc['response.keyword'].value; + if (code.startsWith('5') || code.startsWith('4')) { + state.responses.error += 1 ; + } else if(code.startsWith('2')) { + state.responses.success += 1; + } else { + state.responses.other += 1; + } + """, + "combine_script": "state.responses", + "reduce_script": """ + def counts = ['error': 0L, 'success': 0L, 'other': 0L]; + for (responses in states) { + counts.error += responses['error']; + counts.success += responses['success']; + counts.other += responses['other']; + } + return counts; + """ + } + }, + "timestamp.min": { "min": { "field": "timestamp" }}, + "timestamp.max": { "max": { "field": "timestamp" }}, + "timestamp.duration_ms": { <5> + "bucket_script": { + "buckets_path": { + "min_time": "timestamp.min.value", + "max_time": "timestamp.max.value" + }, + "script": "(params.max_time - params.min_time)" + } + } + } + } +} +---------------------------------- +// TEST[skip:setup kibana sample data] + +<1> This range query limits the {transform} to documents that are within the last +30 days at the point in time the {transform} checkpoint is processed. +For batch {dataframes} this occurs once. +<2> This is the destination index for the {dataframe}. It is ignored by +`_preview`. +<3> The data is grouped by the `clientip` field. +<4> This `scripted_metric` performs a distributed operation on the web log data +to count specific types of HTTP responses (error, success, and other). +<5> This `bucket_script` calculates the duration of the `clientip` access based +on the results of the aggregation. + +The preview shows you that the new index would contain data like this for each +client IP: + +[source,js] +---------------------------------- +{ + "preview" : [ + { + "geo" : { + "src_dc" : 12.0, + "dest_dc" : 9.0 + }, + "clientip" : "0.72.176.46", + "agent_dc" : 3.0, + "responses" : { + "total" : 14.0, + "counts" : { + "other" : 0, + "success" : 14, + "error" : 0 + } + }, + "bytes_sum" : 74808.0, + "timestamp" : { + "duration_ms" : 4.919943239E9, + "min" : "2019-06-17T07:51:57.333Z", + "max" : "2019-08-13T06:31:00.572Z" + }, + "url_dc" : 11.0 + }, + ... + } +---------------------------------- +// NOTCONSOLE + +This {dataframe} makes it easier to answer questions such as: + +* Which client IPs are transferring the most amounts of data? + +* Which client IPs are interacting with a high number of different URLs? + +* Which client IPs have high error rates? + +* Which client IPs are interacting with a high number of destination countries? \ No newline at end of file diff --git a/docs/reference/transform/ecommerce-example.asciidoc b/docs/reference/transform/ecommerce-example.asciidoc new file mode 100644 index 00000000000..7f8267baa16 --- /dev/null +++ b/docs/reference/transform/ecommerce-example.asciidoc @@ -0,0 +1,260 @@ +[role="xpack"] +[testenv="basic"] +[[ecommerce-dataframes]] +=== Transforming the eCommerce sample data + +beta[] + +<> enable you to retrieve information +from an {es} index, transform it, and store it in another index. Let's use the +{kibana-ref}/add-sample-data.html[{kib} sample data] to demonstrate how you can +pivot and summarize your data with {transforms}. + + +. If the {es} {security-features} are enabled, obtain a user ID with sufficient +privileges to complete these steps. ++ +-- +You need `manage_data_frame_transforms` cluster privileges to preview and create +{transforms}. Members of the built-in `data_frame_transforms_admin` +role have these privileges. + +You also need `read` and `view_index_metadata` index privileges on the source +index and `read`, `create_index`, and `index` privileges on the destination +index. + +For more information, see <> and <>. +-- + +. Choose your _source index_. ++ +-- +In this example, we'll use the eCommerce orders sample data. If you're not +already familiar with the `kibana_sample_data_ecommerce` index, use the +*Revenue* dashboard in {kib} to explore the data. Consider what insights you +might want to derive from this eCommerce data. +-- + +. Play with various options for grouping and aggregating the data. ++ +-- +_Pivoting_ your data involves using at least one field to group it and applying +at least one aggregation. You can preview what the transformed data will look +like, so go ahead and play with it! + +For example, you might want to group the data by product ID and calculate the +total number of sales for each product and its average price. Alternatively, you +might want to look at the behavior of individual customers and calculate how +much each customer spent in total and how many different categories of products +they purchased. Or you might want to take the currencies or geographies into +consideration. What are the most interesting ways you can transform and +interpret this data? + +Go to *Machine Learning* > *Data Frames* in {kib} and use the +wizard to create a {transform}: + +[role="screenshot"] +image::images/ecommerce-pivot1.jpg["Creating a simple {transform} in {kib}"] + +In this case, we grouped the data by customer ID and calculated the sum of +products each customer purchased. + +Let's add some more aggregations to learn more about our customers' orders. For +example, let's calculate the total sum of their purchases, the maximum number of +products that they purchased in a single order, and their total number of orders. +We'll accomplish this by using the +{ref}/search-aggregations-metrics-sum-aggregation.html[`sum` aggregation] on the +`taxless_total_price` field, the +{ref}/search-aggregations-metrics-max-aggregation.html[`max` aggregation] on the +`total_quantity` field, and the +{ref}/search-aggregations-metrics-cardinality-aggregation.html[`cardinality` aggregation] +on the `order_id` field: + +[role="screenshot"] +image::images/ecommerce-pivot2.jpg["Adding multiple aggregations to a {transform} in {kib}"] + +TIP: If you're interested in a subset of the data, you can optionally include a +{ref}/search-request-body.html#request-body-search-query[query] element. In this +example, we've filtered the data so that we're only looking at orders with a +`currency` of `EUR`. Alternatively, we could group the data by that field too. +If you want to use more complex queries, you can create your {dataframe} from a +{kibana-ref}/save-open-search.html[saved search]. + +If you prefer, you can use the +{ref}/preview-transform.html[preview {transforms} API]: + +[source,console] +-------------------------------------------------- +POST _data_frame/transforms/_preview +{ + "source": { + "index": "kibana_sample_data_ecommerce", + "query": { + "bool": { + "filter": { + "term": {"currency": "EUR"} + } + } + } + }, + "pivot": { + "group_by": { + "customer_id": { + "terms": { + "field": "customer_id" + } + } + }, + "aggregations": { + "total_quantity.sum": { + "sum": { + "field": "total_quantity" + } + }, + "taxless_total_price.sum": { + "sum": { + "field": "taxless_total_price" + } + }, + "total_quantity.max": { + "max": { + "field": "total_quantity" + } + }, + "order_id.cardinality": { + "cardinality": { + "field": "order_id" + } + } + } + } +} +-------------------------------------------------- +// TEST[skip:set up sample data] +-- + +. When you are satisfied with what you see in the preview, create the +{transform}. ++ +-- +.. Supply a job ID and the name of the target (or _destination_) index. If the +target index does not exist, it will be created automatically. + +.. Decide whether you want the {transform} to run once or continuously. +-- ++ +-- +Since this sample data index is unchanging, let's use the default behavior and +just run the {transform} once. + +[role="screenshot"] +image::images/ecommerce-batch.jpg["Specifying the {transform} options in {kib}"] + +If you want to try it out, however, go ahead and click on *Continuous mode*. +You must choose a field that the {transform} can use to check which +entities have changed. In general, it's a good idea to use the ingest timestamp +field. In this example, however, you can use the `order_date` field. + +If you prefer, you can use the +{ref}/put-transform.html[create {transforms} API]. For +example: + +[source,console] +-------------------------------------------------- +PUT _data_frame/transforms/ecommerce-customer-transform +{ + "source": { + "index": [ + "kibana_sample_data_ecommerce" + ], + "query": { + "bool": { + "filter": { + "term": { + "currency": "EUR" + } + } + } + } + }, + "pivot": { + "group_by": { + "customer_id": { + "terms": { + "field": "customer_id" + } + } + }, + "aggregations": { + "total_quantity.sum": { + "sum": { + "field": "total_quantity" + } + }, + "taxless_total_price.sum": { + "sum": { + "field": "taxless_total_price" + } + }, + "total_quantity.max": { + "max": { + "field": "total_quantity" + } + }, + "order_id.cardinality": { + "cardinality": { + "field": "order_id" + } + } + } + }, + "dest": { + "index": "ecommerce-customers" + } +} +-------------------------------------------------- +// TEST[skip:setup kibana sample data] +-- + +. Start the {transform}. ++ +-- + +TIP: Even though resource utilization is automatically adjusted based on the +cluster load, a {transform} increases search and indexing load on your +cluster while it runs. If you're experiencing an excessive load, however, you +can stop it. + +You can start, stop, and manage {transforms} in {kib}: + +[role="screenshot"] +image::images/dataframe-transforms.jpg["Managing {transforms} in {kib}"] + +Alternatively, you can use the +{ref}/start-transform.html[start {transforms}] and +{ref}/stop-transform.html[stop {transforms}] APIs. For +example: + +[source,console] +-------------------------------------------------- +POST _data_frame/transforms/ecommerce-customer-transform/_start +-------------------------------------------------- +// TEST[skip:setup kibana sample data] + +-- + +. Explore the data in your new index. ++ +-- +For example, use the *Discover* application in {kib}: + +[role="screenshot"] +image::images/ecommerce-results.jpg["Exploring the new index in {kib}"] + +-- + +TIP: If you do not want to keep the {transform}, you can delete it in +{kib} or use the +{ref}/delete-transform.html[delete {transform} API]. When +you delete a {transform}, its destination index and {kib} index +patterns remain. diff --git a/docs/reference/transform/images/dataframe-transforms.jpg b/docs/reference/transform/images/dataframe-transforms.jpg new file mode 100644 index 00000000000..927678f894d Binary files /dev/null and b/docs/reference/transform/images/dataframe-transforms.jpg differ diff --git a/docs/reference/transform/images/ecommerce-batch.jpg b/docs/reference/transform/images/ecommerce-batch.jpg new file mode 100644 index 00000000000..bed3fedd4cf Binary files /dev/null and b/docs/reference/transform/images/ecommerce-batch.jpg differ diff --git a/docs/reference/transform/images/ecommerce-continuous.jpg b/docs/reference/transform/images/ecommerce-continuous.jpg new file mode 100644 index 00000000000..f144fc8cb95 Binary files /dev/null and b/docs/reference/transform/images/ecommerce-continuous.jpg differ diff --git a/docs/reference/transform/images/ecommerce-pivot1.jpg b/docs/reference/transform/images/ecommerce-pivot1.jpg new file mode 100644 index 00000000000..b55b88b8acf Binary files /dev/null and b/docs/reference/transform/images/ecommerce-pivot1.jpg differ diff --git a/docs/reference/transform/images/ecommerce-pivot2.jpg b/docs/reference/transform/images/ecommerce-pivot2.jpg new file mode 100644 index 00000000000..9af5a3c46b7 Binary files /dev/null and b/docs/reference/transform/images/ecommerce-pivot2.jpg differ diff --git a/docs/reference/transform/images/ecommerce-results.jpg b/docs/reference/transform/images/ecommerce-results.jpg new file mode 100644 index 00000000000..f483c3b3c36 Binary files /dev/null and b/docs/reference/transform/images/ecommerce-results.jpg differ diff --git a/docs/reference/transform/images/ml-dataframepivot.jpg b/docs/reference/transform/images/ml-dataframepivot.jpg new file mode 100644 index 00000000000..c0c7946cf44 Binary files /dev/null and b/docs/reference/transform/images/ml-dataframepivot.jpg differ diff --git a/docs/reference/transform/index.asciidoc b/docs/reference/transform/index.asciidoc new file mode 100644 index 00000000000..41ffd97ee39 --- /dev/null +++ b/docs/reference/transform/index.asciidoc @@ -0,0 +1,27 @@ +[role="xpack"] +[[ml-dataframes]] += Transforming data + +[partintro] +-- + +{transforms-cap} enable you to convert existing {es} indices into summarized +indices, which provide opportunities for new insights and analytics. For example, +you can use {transforms} to pivot your data into entity-centric indices that +summarize the behavior of users or sessions or other entities in your data. + +* <> +* <> +* <> +* <> +* <> +* <> +-- + +include::overview.asciidoc[] +include::usage.asciidoc[] +include::checkpoints.asciidoc[] +include::api-quickref.asciidoc[] +include::dataframe-examples.asciidoc[] +include::troubleshooting.asciidoc[] +include::limitations.asciidoc[] \ No newline at end of file diff --git a/docs/reference/transform/limitations.asciidoc b/docs/reference/transform/limitations.asciidoc new file mode 100644 index 00000000000..a97737464b3 --- /dev/null +++ b/docs/reference/transform/limitations.asciidoc @@ -0,0 +1,216 @@ +[role="xpack"] +[[dataframe-limitations]] +== {transform-cap} limitations +[subs="attributes"] +++++ +Limitations +++++ + +beta[] + +The following limitations and known problems apply to the 7.4 release of +the Elastic {dataframe} feature: + +[float] +[[df-compatibility-limitations]] +=== Beta {transforms} do not have guaranteed backwards or forwards compatibility + +Whilst {transforms} are beta, it is not guaranteed that a +{transform} created in a previous version of the {stack} will be able +to start and operate in a future version. Neither can support be provided for +{transform} tasks to be able to operate in a cluster with mixed node +versions. +Please note that the output of a {transform} is persisted to a +destination index. This is a normal {es} index and is not affected by the beta +status. + +[float] +[[df-ui-limitation]] +=== {dataframe-cap} UI will not work during a rolling upgrade from 7.2 + +If your cluster contains mixed version nodes, for example during a rolling +upgrade from 7.2 to a newer version, and {transforms} have been +created in 7.2, the {dataframe} UI will not work. Please wait until all nodes +have been upgraded to the newer version before using the {dataframe} UI. + + +[float] +[[df-datatype-limitations]] +=== {dataframe-cap} data type limitation + +{dataframes-cap} do not (yet) support fields containing arrays – in the UI or +the API. If you try to create one, the UI will fail to show the source index +table. + +[float] +[[df-ccs-limitations]] +=== {ccs-cap} is not supported + +{ccs-cap} is not supported for {transforms}. + +[float] +[[df-kibana-limitations]] +=== Up to 1,000 {transforms} are supported + +A single cluster will support up to 1,000 {transforms}. +When using the +{ref}/get-transform.html[GET {transforms} API] a total +`count` of {transforms} is returned. Use the `size` and `from` parameters to +enumerate through the full list. + +[float] +[[df-aggresponse-limitations]] +=== Aggregation responses may be incompatible with destination index mappings + +When a {transform} is first started, it will deduce the mappings +required for the destination index. This process is based on the field types of +the source index and the aggregations used. If the fields are derived from +{ref}/search-aggregations-metrics-scripted-metric-aggregation.html[`scripted_metrics`] +or {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[`bucket_scripts`], +{ref}/dynamic-mapping.html[dynamic mappings] will be used. In some instances the +deduced mappings may be incompatible with the actual data. For example, numeric +overflows might occur or dynamically mapped fields might contain both numbers +and strings. Please check {es} logs if you think this may have occurred. As a +workaround, you may define custom mappings prior to starting the +{transform}. For example, +{ref}/indices-create-index.html[create a custom destination index] or +{ref}/indices-templates.html[define an index template]. + +[float] +[[df-batch-limitations]] +=== Batch {transforms} may not account for changed documents + +A batch {transform} uses a +{ref}/search-aggregations-bucket-composite-aggregation.html[composite aggregation] +which allows efficient pagination through all buckets. Composite aggregations +do not yet support a search context, therefore if the source data is changed +(deleted, updated, added) while the batch {dataframe} is in progress, then the +results may not include these changes. + +[float] +[[df-consistency-limitations]] +=== {cdataframe-cap} consistency does not account for deleted or updated documents + +While the process for {transforms} allows the continual recalculation +of the {transform} as new data is being ingested, it does also have +some limitations. + +Changed entities will only be identified if their time field +has also been updated and falls within the range of the action to check for +changes. This has been designed in principle for, and is suited to, the use case +where new data is given a timestamp for the time of ingest. + +If the indices that fall within the scope of the source index pattern are +removed, for example when deleting historical time-based indices, then the +composite aggregation performed in consecutive checkpoint processing will search +over different source data, and entities that only existed in the deleted index +will not be removed from the {dataframe} destination index. + +Depending on your use case, you may wish to recreate the {transform} +entirely after deletions. Alternatively, if your use case is tolerant to +historical archiving, you may wish to include a max ingest timestamp in your +aggregation. This will allow you to exclude results that have not been recently +updated when viewing the {dataframe} destination index. + + +[float] +[[df-deletion-limitations]] +=== Deleting a {transform} does not delete the {dataframe} destination index or {kib} index pattern + +When deleting a {transform} using `DELETE _data_frame/transforms/index` +neither the {dataframe} destination index nor the {kib} index pattern, should +one have been created, are deleted. These objects must be deleted separately. + +[float] +[[df-aggregation-page-limitations]] +=== Handling dynamic adjustment of aggregation page size + +During the development of {transforms}, control was favoured over +performance. In the design considerations, it is preferred for the +{transform} to take longer to complete quietly in the background +rather than to finish quickly and take precedence in resource consumption. + +Composite aggregations are well suited for high cardinality data enabling +pagination through results. If a {ref}/circuit-breaker.html[circuit breaker] +memory exception occurs when performing the composite aggregated search then we +try again reducing the number of buckets requested. This circuit breaker is +calculated based upon all activity within the cluster, not just activity from +{transforms}, so it therefore may only be a temporary resource +availability issue. + +For a batch {transform}, the number of buckets requested is only ever +adjusted downwards. The lowering of value may result in a longer duration for the +{transform} checkpoint to complete. For {cdataframes}, the number of +buckets requested is reset back to its default at the start of every checkpoint +and it is possible for circuit breaker exceptions to occur repeatedly in the +{es} logs. + +The {transform} retrieves data in batches which means it calculates +several buckets at once. Per default this is 500 buckets per search/index +operation. The default can be changed using `max_page_search_size` and the +minimum value is 10. If failures still occur once the number of buckets +requested has been reduced to its minimum, then the {transform} will +be set to a failed state. + +[float] +[[df-dynamic-adjustments-limitations]] +=== Handling dynamic adjustments for many terms + +For each checkpoint, entities are identified that have changed since the last +time the check was performed. This list of changed entities is supplied as a +{ref}/query-dsl-terms-query.html[terms query] to the {transform} +composite aggregation, one page at a time. Then updates are applied to the +destination index for each page of entities. + +The page `size` is defined by `max_page_search_size` which is also used to +define the number of buckets returned by the composite aggregation search. The +default value is 500, the minimum is 10. + +The index setting +{ref}/index-modules.html#dynamic-index-settings[`index.max_terms_count`] defines +the maximum number of terms that can be used in a terms query. The default value +is 65536. If `max_page_search_size` exceeds `index.max_terms_count` the +{transform} will fail. + +Using smaller values for `max_page_search_size` may result in a longer duration +for the {transform} checkpoint to complete. + +[float] +[[df-scheduling-limitations]] +=== {cdataframe-cap} scheduling limitations + +A {cdataframe} periodically checks for changes to source data. The functionality +of the scheduler is currently limited to a basic periodic timer which can be +within the `frequency` range from 1s to 1h. The default is 1m. This is designed +to run little and often. When choosing a `frequency` for this timer consider +your ingest rate along with the impact that the {transform} +search/index operations has other users in your cluster. Also note that retries +occur at `frequency` interval. + +[float] +[[df-failed-limitations]] +=== Handling of failed {transforms} + +Failed {transforms} remain as a persistent task and should be handled +appropriately, either by deleting it or by resolving the root cause of the +failure and re-starting. + +When using the API to delete a failed {transform}, first stop it using +`_stop?force=true`, then delete it. + +[float] +[[df-availability-limitations]] +=== {cdataframes-cap} may give incorrect results if documents are not yet available to search + +After a document is indexed, there is a very small delay until it is available +to search. + +A {ctransform} periodically checks for changed entities between the +time since it last checked and `now` minus `sync.time.delay`. This time window +moves without overlapping. If the timestamp of a recently indexed document falls +within this time window but this document is not yet available to search then +this entity will not be updated. + +If using a `sync.time.field` that represents the data ingest time and using a +zero second or very small `sync.time.delay`, then it is more likely that this +issue will occur. \ No newline at end of file diff --git a/docs/reference/transform/overview.asciidoc b/docs/reference/transform/overview.asciidoc new file mode 100644 index 00000000000..fa161f2e9ea --- /dev/null +++ b/docs/reference/transform/overview.asciidoc @@ -0,0 +1,71 @@ +[role="xpack"] +[[ml-transform-overview]] +== {transform-cap} overview +++++ +Overview +++++ + +beta[] + +A _{dataframe}_ is a two-dimensional tabular data structure. In the context of +the {stack}, it is a transformation of data that is indexed in {es}. For +example, you can use {dataframes} to _pivot_ your data into a new entity-centric +index. By transforming and summarizing your data, it becomes possible to +visualize and analyze it in alternative and interesting ways. + +A lot of {es} indices are organized as a stream of events: each event is an +individual document, for example a single item purchase. {dataframes-cap} enable +you to summarize this data, bringing it into an organized, more +analysis-friendly format. For example, you can summarize all the purchases of a +single customer. + +You can create {dataframes} by using {transforms}. +{transforms-cap} enable you to define a pivot, which is a set of +features that transform the index into a different, more digestible format. +Pivoting results in a summary of your data, which is the {dataframe}. + +To define a pivot, first you select one or more fields that you will use to +group your data. You can select categorical fields (terms) and numerical fields +for grouping. If you use numerical fields, the field values are bucketed using +an interval that you specify. + +The second step is deciding how you want to aggregate the grouped data. When +using aggregations, you practically ask questions about the index. There are +different types of aggregations, each with its own purpose and output. To learn +more about the supported aggregations and group-by fields, see +{ref}/transform-resource.html[{transform-cap} resources]. + +As an optional step, you can also add a query to further limit the scope of the +aggregation. + +The {transform} performs a composite aggregation that +paginates through all the data defined by the source index query. The output of +the aggregation is stored in a destination index. Each time the +{transform} queries the source index, it creates a _checkpoint_. You +can decide whether you want the {transform} to run once (batch +{transform}) or continuously ({transform}). A batch +{transform} is a single operation that has a single checkpoint. +{ctransforms-cap} continually increment and process checkpoints as new +source data is ingested. + +.Example + +Imagine that you run a webshop that sells clothes. Every order creates a document +that contains a unique order ID, the name and the category of the ordered product, +its price, the ordered quantity, the exact date of the order, and some customer +information (name, gender, location, etc). Your dataset contains all the transactions +from last year. + +If you want to check the sales in the different categories in your last fiscal +year, define a {transform} that groups the data by the product +categories (women's shoes, men's clothing, etc.) and the order date. Use the +last year as the interval for the order date. Then add a sum aggregation on the +ordered quantity. The result is a {dataframe} that shows the number of sold +items in every product category in the last year. + +[role="screenshot"] +image::images/ml-dataframepivot.jpg["Example of a data frame pivot in {kib}"] + +IMPORTANT: The {transform} leaves your source index intact. It +creates a new index that is dedicated to the {dataframe}. + diff --git a/docs/reference/transform/troubleshooting.asciidoc b/docs/reference/transform/troubleshooting.asciidoc new file mode 100644 index 00000000000..9d76e93415d --- /dev/null +++ b/docs/reference/transform/troubleshooting.asciidoc @@ -0,0 +1,31 @@ +[role="xpack"] +[testenv="basic"] +[[dataframe-troubleshooting]] +== Troubleshooting {transforms} +[subs="attributes"] +++++ +Troubleshooting +++++ + +Use the information in this section to troubleshoot common problems. + +include::{stack-repo-dir}/help.asciidoc[tag=get-help] + +If you encounter problems with your {transforms}, you can gather more +information from the following files and APIs: + +* Lightweight audit messages are stored in `.data-frame-notifications-*`. Search +by your `transform_id`. +* The +{ref}/get-transform-stats.html[get {transform} statistics API] +provides information about the {transform} status and failures. +* If the {transform} exists as a task, you can use the +{ref}/tasks.html[task management API] to gather task information. For example: +`GET _tasks?actions=data_frame/transforms*&detailed`. Typically, the task exists +when the {transform} is in a started or failed state. +* The {es} logs from the node that was running the {transform} might +also contain useful information. You can identify the node from the notification +messages. Alternatively, if the task still exists, you can get that information +from the get {transform} statistics API. For more information, see +{ref}/logging.html[Logging configuration]. + diff --git a/docs/reference/transform/usage.asciidoc b/docs/reference/transform/usage.asciidoc new file mode 100644 index 00000000000..70dfe0f80b3 --- /dev/null +++ b/docs/reference/transform/usage.asciidoc @@ -0,0 +1,56 @@ +[role="xpack"] +[testenv="basic"] +[[ml-transforms-usage]] +== When to use {transforms} + +{es} aggregations are a powerful and flexible feature that enable you to +summarize and retrieve complex insights about your data. You can summarize +complex things like the number of web requests per day on a busy website, broken +down by geography and browser type. If you use the same data set to try to +calculate something as simple as a single number for the average duration of +visitor web sessions, however, you can quickly run out of memory. + +Why does this occur? A web session duration is an example of a behavioral +attribute not held on any one log record; it has to be derived by finding the +first and last records for each session in our weblogs. This derivation requires +some complex query expressions and a lot of memory to connect all the data +points. If you have an ongoing background process that fuses related events from +one index into entity-centric summaries in another index, you get a more useful, +joined-up picture. This new index is sometimes referred to as a _{dataframe}_. + +You might want to consider using {transforms} instead of aggregations when: + +* You need a complete _feature index_ rather than a top-N set of items. ++ +In {ml}, you often need a complete set of behavioral features rather just the +top-N. For example, if you are predicting customer churn, you might look at +features such as the number of website visits in the last week, the total number +of sales, or the number of emails sent. The {stack} {ml-features} create models +based on this multi-dimensional feature space, so they benefit from the full +feature indices that are created by {transforms}. ++ +This scenario also applies when you are trying to search across the results of +an aggregation or multiple aggregations. Aggregation results can be ordered or +filtered, but there are +{ref}/search-aggregations-bucket-terms-aggregation.html#search-aggregations-bucket-terms-aggregation-order[limitations to ordering] +and +{ref}/search-aggregations-pipeline-bucket-selector-aggregation.html[filtering by bucket selector] +is constrained by the maximum number of buckets returned. If you want to search +all aggregation results, you need to create the complete {dataframe}. If you +need to sort or filter the aggregation results by multiple fields, {transforms} +are particularly useful. + +* You need to sort aggregation results by a pipeline aggregation. ++ +{ref}/search-aggregations-pipeline.html[Pipeline aggregations] cannot be used +for sorting. Technically, this is because pipeline aggregations are run during +the reduce phase after all other aggregations have already completed. If you +create a {transform}, you can effectively perform multiple passes over the data. + +* You want to create summary tables to optimize queries. ++ +For example, if you +have a high level dashboard that is accessed by a large number of users and it +uses a complex aggregation over a large dataset, it may be more efficient to +create a {transform} to cache results. Thus, each user doesn't need to run the +aggregation query. diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 9fabbb9bd78..fe8ba436dc1 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,6 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-5.5-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-5.6.2-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=302b7df46730ce75c582542c056c9bf5cac2b94fbf2cc656d0e37e41e8a5d371 +distributionSha256Sum=027fdd265d277bae65a0d349b6b8da02135b0b8e14ba891e26281fa877fe37a2 diff --git a/gradlew b/gradlew index 8e25e6c19d5..83f2acfdc31 100755 --- a/gradlew +++ b/gradlew @@ -125,8 +125,8 @@ if $darwin; then GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" fi -# For Cygwin, switch paths to Windows format before running java -if $cygwin ; then +# For Cygwin or MSYS, switch paths to Windows format before running java +if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then APP_HOME=`cygpath --path --mixed "$APP_HOME"` CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` JAVACMD=`cygpath --unix "$JAVACMD"` diff --git a/libs/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/libs/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java index c208e7d7953..edca86637e1 100644 --- a/libs/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/libs/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -71,6 +71,14 @@ public class TimeValue implements Comparable { return new TimeValue(hours, TimeUnit.HOURS); } + public static TimeValue timeValueDays(long days) { + // 106751.9 days is Long.MAX_VALUE nanoseconds, so we cannot store 106752 days + if (days > 106751) { + throw new IllegalArgumentException("time value cannot store values greater than 106751 days"); + } + return new TimeValue(days, TimeUnit.DAYS); + } + /** * @return the unit used for the this time value, see {@link #duration()} */ diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java index 90f51236252..7385ba4dc15 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregationBuilder.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -32,7 +33,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -80,9 +80,11 @@ public class MatrixStatsAggregationBuilder } @Override - protected MatrixStatsAggregatorFactory innerBuild(SearchContext context, Map> configs, - AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder) throws IOException { - return new MatrixStatsAggregatorFactory(name, configs, multiValueMode, context, parent, subFactoriesBuilder, metaData); + protected MatrixStatsAggregatorFactory innerBuild(QueryShardContext queryShardContext, + Map> configs, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder) throws IOException { + return new MatrixStatsAggregatorFactory(name, configs, multiValueMode, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java index 469cd0dad8f..edd5f20d009 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorFactory.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.matrix.stats; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -37,23 +38,32 @@ final class MatrixStatsAggregatorFactory extends ArrayValuesSourceAggregatorFact private final MultiValueMode multiValueMode; MatrixStatsAggregatorFactory(String name, - Map> configs, MultiValueMode multiValueMode, - SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metaData) throws IOException { - super(name, configs, context, parent, subFactoriesBuilder, metaData); + Map> configs, + MultiValueMode multiValueMode, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, configs, queryShardContext, parent, subFactoriesBuilder, metaData); this.multiValueMode = multiValueMode; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { - return new MatrixStatsAggregator(name, null, context, parent, multiValueMode, pipelineAggregators, metaData); + return new MatrixStatsAggregator(name, null, searchContext, parent, multiValueMode, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(Map valuesSources, Aggregator parent, - boolean collectsFromSingleBucket, List pipelineAggregators, - Map metaData) throws IOException { - return new MatrixStatsAggregator(name, valuesSources, context, parent, multiValueMode, pipelineAggregators, metaData); + protected Aggregator doCreateInternal(Map valuesSources, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new MatrixStatsAggregator(name, valuesSources, searchContext, parent, multiValueMode, pipelineAggregators, metaData); } } diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregationBuilder.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregationBuilder.java index a3d7fc5d642..3d4c786440f 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregationBuilder.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregationBuilder.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.Script; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; @@ -34,7 +35,6 @@ import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; @@ -239,28 +239,28 @@ public abstract class ArrayValuesSourceAggregationBuilder doBuild(SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder) throws IOException { - Map> configs = resolveConfig(context); - ArrayValuesSourceAggregatorFactory factory = innerBuild(context, configs, parent, subFactoriesBuilder); + protected final ArrayValuesSourceAggregatorFactory doBuild(QueryShardContext queryShardContext, AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { + Map> configs = resolveConfig(queryShardContext); + ArrayValuesSourceAggregatorFactory factory = innerBuild(queryShardContext, configs, parent, subFactoriesBuilder); return factory; } - protected Map> resolveConfig(SearchContext context) { + protected Map> resolveConfig(QueryShardContext queryShardContext) { HashMap> configs = new HashMap<>(); for (String field : fields) { - ValuesSourceConfig config = config(context, field, null); + ValuesSourceConfig config = config(queryShardContext, field, null); configs.put(field, config); } return configs; } - protected abstract ArrayValuesSourceAggregatorFactory innerBuild(SearchContext context, + protected abstract ArrayValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, Map> configs, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder) throws IOException; - public ValuesSourceConfig config(SearchContext context, String field, Script script) { + public ValuesSourceConfig config(QueryShardContext queryShardContext, String field, Script script) { ValueType valueType = this.valueType != null ? this.valueType : targetValueType; @@ -282,7 +282,7 @@ public abstract class ArrayValuesSourceAggregationBuilder config = new ValuesSourceConfig<>(valuesSourceType); @@ -291,7 +291,7 @@ public abstract class ArrayValuesSourceAggregationBuilder indexFieldData = context.getForField(fieldType); + IndexFieldData indexFieldData = queryShardContext.getForField(fieldType); ValuesSourceConfig config; if (valuesSourceType == ValuesSourceType.ANY) { diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregatorFactory.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregatorFactory.java index 754cb9576c7..a21c4dbd366 100644 --- a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregatorFactory.java +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/support/ArrayValuesSourceAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.support; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -36,35 +37,44 @@ public abstract class ArrayValuesSourceAggregatorFactory> configs; public ArrayValuesSourceAggregatorFactory(String name, Map> configs, - SearchContext context, AggregatorFactory parent, + QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, context, parent, subFactoriesBuilder, metaData); + super(name, queryShardContext, parent, subFactoriesBuilder, metaData); this.configs = configs; } @Override - public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, - Map metaData) throws IOException { + public Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { HashMap valuesSources = new HashMap<>(); for (Map.Entry> config : configs.entrySet()) { - VS vs = config.getValue().toValuesSource(context.getQueryShardContext()); + VS vs = config.getValue().toValuesSource(queryShardContext); if (vs != null) { valuesSources.put(config.getKey(), vs); } } if (valuesSources.isEmpty()) { - return createUnmapped(parent, pipelineAggregators, metaData); + return createUnmapped(searchContext, parent, pipelineAggregators, metaData); } - return doCreateInternal(valuesSources, parent, collectsFromSingleBucket, pipelineAggregators, metaData); + return doCreateInternal(valuesSources, searchContext, parent, + collectsFromSingleBucket, pipelineAggregators, metaData); } - protected abstract Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, - Map metaData) throws IOException; + protected abstract Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException; - protected abstract Aggregator doCreateInternal(Map valuesSources, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, - Map metaData) throws IOException; + protected abstract Aggregator doCreateInternal(Map valuesSources, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException; } diff --git a/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/240_required_pipeline.yml b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/240_required_pipeline.yml new file mode 100644 index 00000000000..01553bcf40a --- /dev/null +++ b/modules/ingest-common/src/test/resources/rest-api-spec/test/ingest/240_required_pipeline.yml @@ -0,0 +1,175 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "my_pipeline" + ignore: 404 + +--- +"Test index with required pipeline": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "bytes" : { + "field" : "bytes_source_field", + "target_field" : "bytes_target_field" + } + } + ] + } + - match: { acknowledged: true } + # required pipeline via index + - do: + indices.create: + index: test + body: + settings: + index: + required_pipeline: "my_pipeline" + aliases: + test_alias: {} + + - do: + index: + index: test + id: 1 + body: {bytes_source_field: "1kb"} + + - do: + get: + index: test + id: 1 + - match: { _source.bytes_source_field: "1kb" } + - match: { _source.bytes_target_field: 1024 } + # required pipeline via alias + - do: + index: + index: test_alias + id: 2 + body: {bytes_source_field: "1kb"} + + - do: + get: + index: test + id: 2 + - match: { _source.bytes_source_field: "1kb" } + - match: { _source.bytes_target_field: 1024 } + # required pipeline via upsert + - do: + update: + index: test + id: 3 + body: + script: + source: "ctx._source.ran_script = true" + lang: "painless" + upsert: { "bytes_source_field":"1kb" } + - do: + get: + index: test + id: 3 + - match: { _source.bytes_source_field: "1kb" } + - match: { _source.bytes_target_field: 1024 } + # required pipeline via scripted upsert + - do: + update: + index: test + id: 4 + body: + script: + source: "ctx._source.bytes_source_field = '1kb'" + lang: "painless" + upsert : {} + scripted_upsert: true + - do: + get: + index: test + id: 4 + - match: { _source.bytes_source_field: "1kb" } + - match: { _source.bytes_target_field: 1024 } + # required pipeline via doc_as_upsert + - do: + update: + index: test + id: 5 + body: + doc: { "bytes_source_field":"1kb" } + doc_as_upsert: true + - do: + get: + index: test + id: 5 + - match: { _source.bytes_source_field: "1kb" } + - match: { _source.bytes_target_field: 1024 } + # required pipeline via bulk upsert + # note - bulk scripted upsert's execute the pipeline before the script, so any data referenced by the pipeline + # needs to be in the upsert, not the script + - do: + bulk: + refresh: true + body: | + {"update":{"_id":"6","_index":"test"}} + {"script":"ctx._source.ran_script = true","upsert":{"bytes_source_field":"1kb"}} + {"update":{"_id":"7","_index":"test"}} + {"doc":{"bytes_source_field":"2kb"}, "doc_as_upsert":true} + {"update":{"_id":"8","_index":"test"}} + {"script": "ctx._source.ran_script = true","upsert":{"bytes_source_field":"3kb"}, "scripted_upsert" : true} + {"update":{"_id":"6_alias","_index":"test_alias"}} + {"script":"ctx._source.ran_script = true","upsert":{"bytes_source_field":"1kb"}} + {"update":{"_id":"7_alias","_index":"test_alias"}} + {"doc":{"bytes_source_field":"2kb"}, "doc_as_upsert":true} + {"update":{"_id":"8_alias","_index":"test_alias"}} + {"script": "ctx._source.ran_script = true","upsert":{"bytes_source_field":"3kb"}, "scripted_upsert" : true} + + - do: + mget: + body: + docs: + - { _index: "test", _id: "6" } + - { _index: "test", _id: "7" } + - { _index: "test", _id: "8" } + - { _index: "test", _id: "6_alias" } + - { _index: "test", _id: "7_alias" } + - { _index: "test", _id: "8_alias" } + - match: { docs.0._index: "test" } + - match: { docs.0._id: "6" } + - match: { docs.0._source.bytes_source_field: "1kb" } + - match: { docs.0._source.bytes_target_field: 1024 } + - is_false: docs.0._source.ran_script + - match: { docs.1._index: "test" } + - match: { docs.1._id: "7" } + - match: { docs.1._source.bytes_source_field: "2kb" } + - match: { docs.1._source.bytes_target_field: 2048 } + - match: { docs.2._index: "test" } + - match: { docs.2._id: "8" } + - match: { docs.2._source.bytes_source_field: "3kb" } + - match: { docs.2._source.bytes_target_field: 3072 } + - match: { docs.2._source.ran_script: true } + - match: { docs.3._index: "test" } + - match: { docs.3._id: "6_alias" } + - match: { docs.3._source.bytes_source_field: "1kb" } + - match: { docs.3._source.bytes_target_field: 1024 } + - is_false: docs.3._source.ran_script + - match: { docs.4._index: "test" } + - match: { docs.4._id: "7_alias" } + - match: { docs.4._source.bytes_source_field: "2kb" } + - match: { docs.4._source.bytes_target_field: 2048 } + - match: { docs.5._index: "test" } + - match: { docs.5._id: "8_alias" } + - match: { docs.5._source.bytes_source_field: "3kb" } + - match: { docs.5._source.bytes_target_field: 3072 } + - match: { docs.5._source.ran_script: true } + + # bad request, request pipeline can not be specified + - do: + catch: /illegal_argument_exception.*request pipeline \[pipeline\] can not override required pipeline \[my_pipeline\]/ + index: + index: test + id: 9 + pipeline: "pipeline" + body: {bytes_source_field: "1kb"} diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/IngestUserAgentPlugin.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/IngestUserAgentPlugin.java index 899b3eb721c..e0bd7b7207a 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/IngestUserAgentPlugin.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/IngestUserAgentPlugin.java @@ -32,6 +32,7 @@ import java.nio.file.PathMatcher; import java.nio.file.StandardOpenOption; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.stream.Stream; @@ -86,4 +87,8 @@ public class IngestUserAgentPlugin extends Plugin implements IngestPlugin { return Collections.unmodifiableMap(userAgentParsers); } + @Override + public List> getSettings() { + return Collections.singletonList(CACHE_SIZE_SETTING); + } } diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index 2a46bd9ed2e..486793ab561 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -24,6 +24,6 @@ esplugin { } dependencies { - compile "com.github.spullara.mustache.java:compiler:0.9.3" + compile "com.github.spullara.mustache.java:compiler:0.9.6" } diff --git a/modules/lang-mustache/licenses/compiler-0.9.3.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.3.jar.sha1 deleted file mode 100644 index 2b0fbbc542e..00000000000 --- a/modules/lang-mustache/licenses/compiler-0.9.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2815e016c63bec4f18704ea4f5489106a5b01a99 \ No newline at end of file diff --git a/modules/lang-mustache/licenses/compiler-0.9.6.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.6.jar.sha1 new file mode 100644 index 00000000000..9c0e5464147 --- /dev/null +++ b/modules/lang-mustache/licenses/compiler-0.9.6.jar.sha1 @@ -0,0 +1 @@ +1b8707299c34406ed0ba40bbf8513352ac4765c9 \ No newline at end of file diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java index 1af50f4842f..ab03ba173b4 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessExecuteAction.java @@ -570,9 +570,11 @@ public class PainlessExecuteAction extends ActionType absoluteStartMillis, null); + indexService.newQueryShardContext(0, searcher, () -> absoluteStartMillis, null); return handler.apply(context, indexReader.leaves().get(0)); } } diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml index cf55810058d..e32139c2196 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml @@ -501,3 +501,53 @@ setup: - match: { hits.total: 2 } - match: { hits.hits.0._id : "2" } - match: { hits.hits.1._id : "1" } + + +--- +"Script Score With Highlight": + - skip: + version: " - 7.4.0" + reason: "highlight for script_score was introduced in 7.4.1" + + - do: + indices.create: + index: test_index + body: + mappings: + "properties": + "company": + "type": "text" + "reputation": + "type": "integer" + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test_index", "_id" : "1"}}' + - '{"company": "ABC company", "reputation": 300}' + - '{"index": {"_index": "test_index", "_id" : "2"}}' + - '{"company": "ABC ABCD company", "reputation": 200}' + - '{"index": {"_index": "test_index", "_id" : "3"}}' + - '{"company": "ABCD company", "reputation": 100}' + + - do: + search: + body: + query: + script_score: + script: + source: "doc['reputation'].value" + query: + bool: + should: + - match: + company: ABC + - match: + company: ABCD + highlight: + fields: + company: {} + + - match: {hits.hits.0.highlight.company.0: "ABC company"} + - match: {hits.hits.1.highlight.company.0: "ABC ABCD company"} + - match: {hits.hits.2.highlight.company.0: "ABCD company"} diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java index c10b025d275..29549ded441 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.join.mapper.ParentIdFieldMapper; import org.elasticsearch.join.mapper.ParentJoinFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -39,7 +40,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -95,29 +95,29 @@ public class ChildrenAggregationBuilder } @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, - ValuesSourceConfig config, - AggregatorFactory parent, - Builder subFactoriesBuilder) throws IOException { - return new ChildrenAggregatorFactory(name, config, childFilter, parentFilter, context, parent, + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { + return new ChildrenAggregatorFactory(name, config, childFilter, parentFilter, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override - protected ValuesSourceConfig resolveConfig(SearchContext context) { + protected ValuesSourceConfig resolveConfig(QueryShardContext queryShardContext) { ValuesSourceConfig config = new ValuesSourceConfig<>(ValuesSourceType.BYTES); - joinFieldResolveConfig(context, config); + joinFieldResolveConfig(queryShardContext, config); return config; } - private void joinFieldResolveConfig(SearchContext context, ValuesSourceConfig config) { - ParentJoinFieldMapper parentJoinFieldMapper = ParentJoinFieldMapper.getMapper(context.mapperService()); + private void joinFieldResolveConfig(QueryShardContext queryShardContext, ValuesSourceConfig config) { + ParentJoinFieldMapper parentJoinFieldMapper = ParentJoinFieldMapper.getMapper(queryShardContext.getMapperService()); ParentIdFieldMapper parentIdFieldMapper = parentJoinFieldMapper.getParentIdFieldMapper(childType, false); if (parentIdFieldMapper != null) { parentFilter = parentIdFieldMapper.getParentFilter(); childFilter = parentIdFieldMapper.getChildFilter(childType); MappedFieldType fieldType = parentIdFieldMapper.fieldType(); - final SortedSetDVOrdinalsIndexFieldData fieldData = context.getForField(fieldType); + final SortedSetDVOrdinalsIndexFieldData fieldData = queryShardContext.getForField(fieldType); config.fieldContext(new FieldContext(fieldType.name(), fieldData, fieldType)); } else { config.unmapped(true); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java index a8cfe62edd9..57a746677f8 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java @@ -20,6 +20,7 @@ package org.elasticsearch.join.aggregations; import org.apache.lucene.search.Query; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -44,7 +45,7 @@ public class ChildrenAggregatorFactory extends ValuesSourceAggregatorFactory config, Query childFilter, Query parentFilter, - SearchContext context, + QueryShardContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { @@ -55,9 +56,9 @@ public class ChildrenAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, Map metaData) throws IOException { - return new NonCollectingAggregator(name, context, parent, pipelineAggregators, metaData) { + return new NonCollectingAggregator(name, searchContext, parent, pipelineAggregators, metaData) { @Override public InternalAggregation buildEmptyAggregation() { return new InternalChildren(name, 0, buildEmptySubAggregations(), pipelineAggregators(), metaData()); @@ -67,17 +68,17 @@ public class ChildrenAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, - Map metaData) throws IOException { + SearchContext searchContext, Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { - long maxOrd = valuesSource.globalMaxOrd(context.searcher()); + long maxOrd = valuesSource.globalMaxOrd(searchContext.searcher()); if (collectsFromSingleBucket) { - return new ParentToChildrenAggregator(name, factories, context, parent, childFilter, + return new ParentToChildrenAggregator(name, factories, searchContext, parent, childFilter, parentFilter, valuesSource, maxOrd, pipelineAggregators, metaData); } else { - return asMultiBucketAggregator(this, context, parent); + return asMultiBucketAggregator(this, searchContext, parent); } } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java index 83575d6d527..4386c417677 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.join.mapper.ParentIdFieldMapper; import org.elasticsearch.join.mapper.ParentJoinFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -39,7 +40,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -95,29 +95,29 @@ public class ParentAggregationBuilder } @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, - ValuesSourceConfig config, - AggregatorFactory parent, - Builder subFactoriesBuilder) throws IOException { - return new ParentAggregatorFactory(name, config, childFilter, parentFilter, context, parent, + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { + return new ParentAggregatorFactory(name, config, childFilter, parentFilter, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override - protected ValuesSourceConfig resolveConfig(SearchContext context) { + protected ValuesSourceConfig resolveConfig(QueryShardContext queryShardContext) { ValuesSourceConfig config = new ValuesSourceConfig<>(ValuesSourceType.BYTES); - joinFieldResolveConfig(context, config); + joinFieldResolveConfig(queryShardContext, config); return config; } - private void joinFieldResolveConfig(SearchContext context, ValuesSourceConfig config) { - ParentJoinFieldMapper parentJoinFieldMapper = ParentJoinFieldMapper.getMapper(context.mapperService()); + private void joinFieldResolveConfig(QueryShardContext queryShardContext, ValuesSourceConfig config) { + ParentJoinFieldMapper parentJoinFieldMapper = ParentJoinFieldMapper.getMapper(queryShardContext.getMapperService()); ParentIdFieldMapper parentIdFieldMapper = parentJoinFieldMapper.getParentIdFieldMapper(childType, false); if (parentIdFieldMapper != null) { parentFilter = parentIdFieldMapper.getParentFilter(); childFilter = parentIdFieldMapper.getChildFilter(childType); MappedFieldType fieldType = parentIdFieldMapper.fieldType(); - final SortedSetDVOrdinalsIndexFieldData fieldData = context.getForField(fieldType); + final SortedSetDVOrdinalsIndexFieldData fieldData = queryShardContext.getForField(fieldType); config.fieldContext(new FieldContext(fieldType.name(), fieldData, fieldType)); } else { config.unmapped(true); diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregatorFactory.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregatorFactory.java index fd87744dcf2..dc64d0308cd 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregatorFactory.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregatorFactory.java @@ -20,6 +20,7 @@ package org.elasticsearch.join.aggregations; import org.apache.lucene.search.Query; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -44,20 +45,20 @@ public class ParentAggregatorFactory extends ValuesSourceAggregatorFactory config, Query childFilter, Query parentFilter, - SearchContext context, + QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.childFilter = childFilter; this.parentFilter = parentFilter; } @Override - protected Aggregator createUnmapped(Aggregator parent, + protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { - return new NonCollectingAggregator(name, context, parent, pipelineAggregators, metaData) { + return new NonCollectingAggregator(name, searchContext, parent, pipelineAggregators, metaData) { @Override public InternalAggregation buildEmptyAggregation() { return new InternalParent(name, 0, buildEmptySubAggregations(), pipelineAggregators(), metaData()); @@ -67,17 +68,17 @@ public class ParentAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, Map metaData) throws IOException { - long maxOrd = valuesSource.globalMaxOrd(context.searcher()); + long maxOrd = valuesSource.globalMaxOrd(searchContext.searcher()); if (collectsFromSingleBucket) { - return new ChildrenToParentAggregator(name, factories, context, children, childFilter, + return new ChildrenToParentAggregator(name, factories, searchContext, children, childFilter, parentFilter, valuesSource, maxOrd, pipelineAggregators, metaData); } else { - return asMultiBucketAggregator(this, context, children); + return asMultiBucketAggregator(this, searchContext, children); } } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java index 090798eb51c..b9f6ed85db3 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java @@ -67,20 +67,26 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder { } @Override - protected void doBuild(SearchContext context, InnerHitsContext innerHitsContext) throws IOException { + public void doValidate(QueryShardContext queryShardContext) { + if (ParentJoinFieldMapper.getMapper(queryShardContext.getMapperService()) == null + && innerHitBuilder.isIgnoreUnmapped() == false) { + throw new IllegalStateException("no join field has been configured"); + } + } + + @Override + public void build(SearchContext context, InnerHitsContext innerHitsContext) throws IOException { QueryShardContext queryShardContext = context.getQueryShardContext(); ParentJoinFieldMapper joinFieldMapper = ParentJoinFieldMapper.getMapper(context.mapperService()); - if (joinFieldMapper != null) { - String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : typeName; - JoinFieldInnerHitSubContext joinFieldInnerHits = new JoinFieldInnerHitSubContext(name, context, typeName, - fetchChildInnerHits, joinFieldMapper); - setupInnerHitsContext(queryShardContext, joinFieldInnerHits); - innerHitsContext.addInnerHitDefinition(joinFieldInnerHits); - } else { - if (innerHitBuilder.isIgnoreUnmapped() == false) { - throw new IllegalStateException("no join field has been configured"); - } + if (joinFieldMapper == null) { + assert innerHitBuilder.isIgnoreUnmapped() : "should be validated first"; + return; } + String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : typeName; + JoinFieldInnerHitSubContext joinFieldInnerHits = + new JoinFieldInnerHitSubContext(name, context, typeName, fetchChildInnerHits, joinFieldMapper); + setupInnerHitsContext(queryShardContext, joinFieldInnerHits); + innerHitsContext.addInnerHitDefinition(joinFieldInnerHits); } static final class JoinFieldInnerHitSubContext extends InnerHitsContext.InnerHitSubContext { @@ -88,8 +94,11 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder { private final boolean fetchChildInnerHits; private final ParentJoinFieldMapper joinFieldMapper; - JoinFieldInnerHitSubContext(String name, SearchContext context, String typeName, boolean fetchChildInnerHits, - ParentJoinFieldMapper joinFieldMapper) { + JoinFieldInnerHitSubContext(String name, + SearchContext context, + String typeName, + boolean fetchChildInnerHits, + ParentJoinFieldMapper joinFieldMapper) { super(name, context); this.typeName = typeName; this.fetchChildInnerHits = fetchChildInnerHits; @@ -102,13 +111,13 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder { TopDocsAndMaxScore[] result = new TopDocsAndMaxScore[hits.length]; for (int i = 0; i < hits.length; i++) { SearchHit hit = hits[i]; - String joinName = getSortedDocValue(joinFieldMapper.name(), context, hit.docId()); + String joinName = getSortedDocValue(joinFieldMapper.name(), this, hit.docId()); if (joinName == null) { result[i] = new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN); continue; } - QueryShardContext qsc = context.getQueryShardContext(); + QueryShardContext qsc = getQueryShardContext(); ParentIdFieldMapper parentIdFieldMapper = joinFieldMapper.getParentIdFieldMapper(typeName, fetchChildInnerHits == false); if (parentIdFieldMapper == null) { @@ -126,14 +135,14 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder { .add(joinFieldMapper.fieldType().termQuery(typeName, qsc), BooleanClause.Occur.FILTER) .build(); } else { - String parentId = getSortedDocValue(parentIdFieldMapper.name(), context, hit.docId()); - q = context.mapperService().fullName(IdFieldMapper.NAME).termQuery(parentId, qsc); + String parentId = getSortedDocValue(parentIdFieldMapper.name(), this, hit.docId()); + q = mapperService().fullName(IdFieldMapper.NAME).termQuery(parentId, qsc); } - Weight weight = context.searcher().createWeight(context.searcher().rewrite(q), ScoreMode.COMPLETE_NO_SCORES, 1f); + Weight weight = searcher().createWeight(searcher().rewrite(q), ScoreMode.COMPLETE_NO_SCORES, 1f); if (size() == 0) { TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); - for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { + for (LeafReaderContext ctx : searcher().getIndexReader().leaves()) { intersect(weight, innerHitQueryWeight, totalHitCountCollector, ctx); } result[i] = new TopDocsAndMaxScore( @@ -142,7 +151,7 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder { Lucene.EMPTY_SCORE_DOCS ), Float.NaN); } else { - int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); + int topN = Math.min(from() + size(), searcher().getIndexReader().maxDoc()); TopDocsCollector topDocsCollector; MaxScoreCollector maxScoreCollector = null; if (sort() != null) { @@ -155,7 +164,7 @@ class ParentChildInnerHitContextBuilder extends InnerHitContextBuilder { maxScoreCollector = new MaxScoreCollector(); } try { - for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { + for (LeafReaderContext ctx : searcher().getIndexReader().leaves()) { intersect(weight, innerHitQueryWeight, MultiCollector.wrap(topDocsCollector, maxScoreCollector), ctx); } } finally { diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 8f38b8b9ae5..2e682eda733 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -183,14 +183,13 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase innerHitBuilders = new HashMap<>(); InnerHitContextBuilder.extractInnerHits(queryBuilder, innerHitBuilders); + final InnerHitsContext innerHitsContext = new InnerHitsContext(); for (InnerHitContextBuilder builder : innerHitBuilders.values()) { - builder.build(searchContext, searchContext.innerHits()); + builder.build(searchContext, innerHitsContext); } - assertNotNull(searchContext.innerHits()); - assertEquals(1, searchContext.innerHits().getInnerHits().size()); - assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName())); - InnerHitsContext.InnerHitSubContext innerHits = - searchContext.innerHits().getInnerHits().get(queryBuilder.innerHit().getName()); + assertEquals(1, innerHitsContext.getInnerHits().size()); + assertTrue(innerHitsContext.getInnerHits().containsKey(queryBuilder.innerHit().getName())); + InnerHitsContext.InnerHitSubContext innerHits = innerHitsContext.getInnerHits().get(queryBuilder.innerHit().getName()); assertEquals(innerHits.size(), queryBuilder.innerHit().getSize()); assertEquals(innerHits.sort().sort.getSort().length, 1); assertEquals(innerHits.sort().sort.getSort()[0].getField(), STRING_FIELD_NAME_2); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java index ea77ad80799..73d29314130 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasParentQueryBuilderTests.java @@ -148,17 +148,15 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase innerHitBuilders = new HashMap<>(); InnerHitContextBuilder.extractInnerHits(queryBuilder, innerHitBuilders); + final InnerHitsContext innerHitsContext = new InnerHitsContext(); for (InnerHitContextBuilder builder : innerHitBuilders.values()) { - builder.build(searchContext, searchContext.innerHits()); + builder.build(searchContext, innerHitsContext); } - assertNotNull(searchContext.innerHits()); - assertEquals(1, searchContext.innerHits().getInnerHits().size()); - assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName())); - InnerHitsContext.InnerHitSubContext innerHits = searchContext.innerHits() - .getInnerHits().get(queryBuilder.innerHit().getName()); + assertEquals(1, innerHitsContext.getInnerHits().size()); + assertTrue(innerHitsContext.getInnerHits().containsKey(queryBuilder.innerHit().getName())); + InnerHitsContext.InnerHitSubContext innerHits = innerHitsContext.getInnerHits().get(queryBuilder.innerHit().getName()); assertEquals(innerHits.size(), queryBuilder.innerHit().getSize()); assertEquals(innerHits.sort().sort.getSort().length, 1); assertEquals(innerHits.sort().sort.getSort()[0].getField(), STRING_FIELD_NAME_2); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index f1747d19775..8db8a549c1e 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -146,7 +146,6 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { mapperService = indexService.mapperService(); String mapper = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("doc") - .startObject("_field_names").field("enabled", false).endObject() // makes testing easier .startObject("properties") .startObject("field").field("type", "text").endObject() .startObject("field1").field("type", "text").endObject() @@ -322,7 +321,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { ParseContext.Document document = parseContext.doc(); PercolatorFieldMapper.FieldType fieldType = (PercolatorFieldMapper.FieldType) fieldMapper.fieldType(); - assertThat(document.getFields().size(), equalTo(3)); + assertThat(document.getFields().size(), equalTo(4)); assertThat(document.getFields().get(0).binaryValue().utf8ToString(), equalTo("field\u0000term")); assertThat(document.getField(fieldType.extractionResultField.name()).stringValue(), equalTo(EXTRACTION_PARTIAL)); } @@ -610,7 +609,6 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { public void testMultiplePercolatorFields() throws Exception { String typeName = "doc"; String percolatorMapper = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject(typeName) - .startObject("_field_names").field("enabled", false).endObject() // makes testing easier .startObject("properties") .startObject("query_field1").field("type", "percolator").endObject() .startObject("query_field2").field("type", "percolator").endObject() @@ -625,7 +623,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { .field("query_field2", queryBuilder) .endObject()), XContentType.JSON)); - assertThat(doc.rootDoc().getFields().size(), equalTo(14)); // also includes all other meta fields + assertThat(doc.rootDoc().getFields().size(), equalTo(16)); // also includes all other meta fields BytesRef queryBuilderAsBytes = doc.rootDoc().getField("query_field1.query_builder_field").binaryValue(); assertQueryBuilder(queryBuilderAsBytes, queryBuilder); @@ -637,7 +635,6 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { public void testNestedPercolatorField() throws Exception { String typeName = "doc"; String percolatorMapper = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject(typeName) - .startObject("_field_names").field("enabled", false).endObject() // makes testing easier .startObject("properties") .startObject("object_field") .field("type", "object") @@ -655,7 +652,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { .field("query_field", queryBuilder) .endObject().endObject()), XContentType.JSON)); - assertThat(doc.rootDoc().getFields().size(), equalTo(10)); // also includes all other meta fields + assertThat(doc.rootDoc().getFields().size(), equalTo(12)); // also includes all other meta fields BytesRef queryBuilderAsBytes = doc.rootDoc().getField("object_field.query_field.query_builder_field").binaryValue(); assertQueryBuilder(queryBuilderAsBytes, queryBuilder); @@ -666,7 +663,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { .endArray() .endObject()), XContentType.JSON)); - assertThat(doc.rootDoc().getFields().size(), equalTo(10)); // also includes all other meta fields + assertThat(doc.rootDoc().getFields().size(), equalTo(12)); // also includes all other meta fields queryBuilderAsBytes = doc.rootDoc().getField("object_field.query_field.query_builder_field").binaryValue(); assertQueryBuilder(queryBuilderAsBytes, queryBuilder); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java index c959ff52b19..7193a696b47 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java @@ -258,7 +258,7 @@ public class PercolatorQuerySearchTests extends ESSingleNodeTestCase { try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { long[] currentTime = new long[] {System.currentTimeMillis()}; QueryShardContext queryShardContext = - indexService.newQueryShardContext(0, searcher.getIndexReader(), () -> currentTime[0], null); + indexService.newQueryShardContext(0, searcher, () -> currentTime[0], null); BytesReference source = BytesReference.bytes(jsonBuilder().startObject() .field("field1", "value") diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 0afd6829674..1e9aacb5afa 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -878,7 +878,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { new IndexResponse( shardId, index.type(), - index.id(), + index.id() == null ? "dummy_id" : index.id(), randomInt(20), randomIntBetween(1, 16), randomIntBetween(0, Integer.MAX_VALUE), diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 933b429ef73..a73af0d2cd0 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -16,6 +16,8 @@ * specific language governing permissions and limitations * under the License. */ + +import org.elasticsearch.gradle.PropertyNormalization import org.elasticsearch.gradle.test.AntFixture esplugin { @@ -42,7 +44,7 @@ integTest { } testClusters.integTest { // repositoryDir is used by a FS repository to create snapshots - setting 'path.repo', "${repositoryDir.absolutePath}" + setting 'path.repo', "${repositoryDir.absolutePath}", PropertyNormalization.IGNORE_VALUE // repositoryDir is used by two URL repositories to restore snapshots - setting 'repositories.url.allowed_urls', { "http://snapshot.test*,http://${urlFixture.addressAndPort}" } + setting 'repositories.url.allowed_urls', { "http://snapshot.test*,http://${urlFixture.addressAndPort}" }, PropertyNormalization.IGNORE_VALUE } diff --git a/plugins/discovery-azure-classic/licenses/httpclient-4.5.10.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpclient-4.5.10.jar.sha1 new file mode 100644 index 00000000000..b708efd0dd5 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/httpclient-4.5.10.jar.sha1 @@ -0,0 +1 @@ +7ca2e4276f4ef95e4db725a8cd4a1d1e7585b9e5 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpclient-4.5.8.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpclient-4.5.8.jar.sha1 deleted file mode 100644 index 73f0d30c709..00000000000 --- a/plugins/discovery-azure-classic/licenses/httpclient-4.5.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.11.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.11.jar.sha1 deleted file mode 100644 index 6d64372bfcc..00000000000 --- a/plugins/discovery-azure-classic/licenses/httpcore-4.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de748cf874e4e193b42eceea9fe5574fabb9d4df \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 new file mode 100644 index 00000000000..3c046171b30 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/httpcore-4.4.12.jar.sha1 @@ -0,0 +1 @@ +21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpclient-4.5.10.jar.sha1 b/plugins/discovery-ec2/licenses/httpclient-4.5.10.jar.sha1 new file mode 100644 index 00000000000..b708efd0dd5 --- /dev/null +++ b/plugins/discovery-ec2/licenses/httpclient-4.5.10.jar.sha1 @@ -0,0 +1 @@ +7ca2e4276f4ef95e4db725a8cd4a1d1e7585b9e5 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpclient-4.5.8.jar.sha1 b/plugins/discovery-ec2/licenses/httpclient-4.5.8.jar.sha1 deleted file mode 100644 index 73f0d30c709..00000000000 --- a/plugins/discovery-ec2/licenses/httpclient-4.5.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.11.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.11.jar.sha1 deleted file mode 100644 index 6d64372bfcc..00000000000 --- a/plugins/discovery-ec2/licenses/httpcore-4.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de748cf874e4e193b42eceea9fe5574fabb9d4df \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 new file mode 100644 index 00000000000..3c046171b30 --- /dev/null +++ b/plugins/discovery-ec2/licenses/httpcore-4.4.12.jar.sha1 @@ -0,0 +1 @@ +21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpclient-4.5.10.jar.sha1 b/plugins/discovery-gce/licenses/httpclient-4.5.10.jar.sha1 new file mode 100644 index 00000000000..b708efd0dd5 --- /dev/null +++ b/plugins/discovery-gce/licenses/httpclient-4.5.10.jar.sha1 @@ -0,0 +1 @@ +7ca2e4276f4ef95e4db725a8cd4a1d1e7585b9e5 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpclient-4.5.8.jar.sha1 b/plugins/discovery-gce/licenses/httpclient-4.5.8.jar.sha1 deleted file mode 100644 index 73f0d30c709..00000000000 --- a/plugins/discovery-gce/licenses/httpclient-4.5.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.11.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.11.jar.sha1 deleted file mode 100644 index 6d64372bfcc..00000000000 --- a/plugins/discovery-gce/licenses/httpcore-4.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de748cf874e4e193b42eceea9fe5574fabb9d4df \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 new file mode 100644 index 00000000000..3c046171b30 --- /dev/null +++ b/plugins/discovery-gce/licenses/httpcore-4.4.12.jar.sha1 @@ -0,0 +1 @@ +21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index e4a7e3acb65..968d8396f7e 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -68,6 +68,10 @@ public class AzureBlobStore implements BlobStore { return container; } + public AzureStorageService getService() { + return service; + } + /** * Gets the configured {@link LocationMode} for the Azure storage requests. */ diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index a9b236a48a0..8815e738f9c 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -50,7 +50,12 @@ public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin, R public AzureRepositoryPlugin(Settings settings) { // eagerly load client settings so that secure settings are read - this.azureStoreService = new AzureStorageService(settings); + this.azureStoreService = createAzureStoreService(settings); + } + + // non-static, package private for testing + AzureStorageService createAzureStoreService(final Settings settings) { + return new AzureStorageService(settings); } @Override diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index ef34c533501..fbdac39b9f6 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -24,11 +24,13 @@ import com.microsoft.azure.storage.CloudStorageAccount; import com.microsoft.azure.storage.OperationContext; import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.RetryPolicy; +import com.microsoft.azure.storage.RetryPolicyFactory; import com.microsoft.azure.storage.StorageErrorCodeStrings; import com.microsoft.azure.storage.StorageException; import com.microsoft.azure.storage.blob.BlobInputStream; import com.microsoft.azure.storage.blob.BlobListingDetails; import com.microsoft.azure.storage.blob.BlobProperties; +import com.microsoft.azure.storage.blob.BlobRequestOptions; import com.microsoft.azure.storage.blob.CloudBlob; import com.microsoft.azure.storage.blob.CloudBlobClient; import com.microsoft.azure.storage.blob.CloudBlobContainer; @@ -111,7 +113,7 @@ public class AzureStorageService { } } - private static CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + private CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { final CloudBlobClient client = createClient(azureStorageSettings); // Set timeout option if the user sets cloud.azure.storage.timeout or // cloud.azure.storage.xxx.timeout (it's negative by default) @@ -123,12 +125,16 @@ public class AzureStorageService { client.getDefaultRequestOptions().setTimeoutIntervalInMs((int) timeout); } // We define a default exponential retry policy - client.getDefaultRequestOptions() - .setRetryPolicyFactory(new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, azureStorageSettings.getMaxRetries())); + client.getDefaultRequestOptions().setRetryPolicyFactory(createRetryPolicy(azureStorageSettings)); client.getDefaultRequestOptions().setLocationMode(azureStorageSettings.getLocationMode()); return client; } + // non-static, package private for testing + RetryPolicyFactory createRetryPolicy(final AzureStorageSettings azureStorageSettings) { + return new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, azureStorageSettings.getMaxRetries()); + } + private static CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { final String connectionString = azureStorageSettings.getConnectString(); return CloudStorageAccount.parse(connectionString).createCloudBlobClient(); @@ -324,7 +330,7 @@ public class AzureStorageService { final AccessCondition accessCondition = failIfAlreadyExists ? AccessCondition.generateIfNotExistsCondition() : AccessCondition.generateEmptyCondition(); SocketAccess.doPrivilegedVoidException(() -> - blob.upload(inputStream, blobSize, accessCondition, null, client.v2().get())); + blob.upload(inputStream, blobSize, accessCondition, getBlobRequestOptionsForWriteBlob(), client.v2().get())); } catch (final StorageException se) { if (failIfAlreadyExists && se.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT && StorageErrorCodeStrings.BLOB_ALREADY_EXISTS.equals(se.getErrorCode())) { @@ -335,6 +341,11 @@ public class AzureStorageService { logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {}) - done", blobName, blobSize)); } + // package private for testing + BlobRequestOptions getBlobRequestOptionsForWriteBlob() { + return null; + } + static InputStream giveSocketPermissionsToStream(final InputStream stream) { return new InputStream() { @Override diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 6e3de990151..2f12b1c61ff 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -18,63 +18,43 @@ */ package org.elasticsearch.repositories.azure; +import com.microsoft.azure.storage.Constants; +import com.microsoft.azure.storage.RetryExponentialRetry; +import com.microsoft.azure.storage.RetryPolicyFactory; +import com.microsoft.azure.storage.blob.BlobRequestOptions; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; -import com.sun.net.httpserver.HttpServer; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; +import org.elasticsearch.repositories.blobstore.ESMockAPIBasedRepositoryIntegTestCase; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestUtils; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; +import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; +import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an Azure endpoint") -public class AzureBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { - - private static HttpServer httpServer; - - @BeforeClass - public static void startHttpServer() throws Exception { - httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); - httpServer.start(); - } - - @Before - public void setUpHttpServer() { - httpServer.createContext("/container", new InternalHttpHandler()); - } - - @AfterClass - public static void stopHttpServer() { - httpServer.stop(0); - httpServer = null; - } - - @After - public void tearDownHttpServer() { - httpServer.removeContext("/container"); - } +public class AzureBlobStoreRepositoryTests extends ESMockAPIBasedRepositoryIntegTestCase { @Override protected String repositoryType() { @@ -84,6 +64,7 @@ public class AzureBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTes @Override protected Settings repositorySettings() { return Settings.builder() + .put(super.repositorySettings()) .put(AzureRepository.Repository.CONTAINER_SETTING.getKey(), "container") .put(AzureStorageSettings.ACCOUNT_SETTING.getKey(), "test") .build(); @@ -91,7 +72,17 @@ public class AzureBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTes @Override protected Collection> nodePlugins() { - return Collections.singletonList(AzureRepositoryPlugin.class); + return Collections.singletonList(TestAzureRepositoryPlugin.class); + } + + @Override + protected Map createHttpHandlers() { + return Collections.singletonMap("/container", new InternalHttpHandler()); + } + + @Override + protected HttpHandler createErroneousHttpHandler(final HttpHandler delegate) { + return new AzureErroneousHttpHandler(delegate, randomIntBetween(2, 3)); } @Override @@ -101,10 +92,7 @@ public class AzureBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTes secureSettings.setString(AzureStorageSettings.ACCOUNT_SETTING.getConcreteSettingForNamespace("test").getKey(), "account"); secureSettings.setString(AzureStorageSettings.KEY_SETTING.getConcreteSettingForNamespace("test").getKey(), key); - final InetSocketAddress address = httpServer.getAddress(); - final String endpoint = "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://" - + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort(); - + final String endpoint = "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=" + httpServerUrl(); return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(AzureStorageSettings.ENDPOINT_SUFFIX_SETTING.getConcreteSettingForNamespace("test").getKey(), endpoint) @@ -112,6 +100,34 @@ public class AzureBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTes .build(); } + /** + * AzureRepositoryPlugin that allows to set low values for the Azure's client retry policy + * and for BlobRequestOptions#getSingleBlobPutThresholdInBytes(). + */ + public static class TestAzureRepositoryPlugin extends AzureRepositoryPlugin { + + public TestAzureRepositoryPlugin(Settings settings) { + super(settings); + } + + @Override + AzureStorageService createAzureStoreService(final Settings settings) { + return new AzureStorageService(settings) { + @Override + RetryPolicyFactory createRetryPolicy(final AzureStorageSettings azureStorageSettings) { + return new RetryExponentialRetry(1, 100, 500, azureStorageSettings.getMaxRetries()); + } + + @Override + BlobRequestOptions getBlobRequestOptionsForWriteBlob() { + BlobRequestOptions options = new BlobRequestOptions(); + options.setSingleBlobPutThresholdInBytes(Math.toIntExact(ByteSizeUnit.MB.toBytes(1))); + return options; + } + }; + } + } + /** * Minimal HTTP handler that acts as an Azure compliant server */ @@ -124,12 +140,36 @@ public class AzureBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTes public void handle(final HttpExchange exchange) throws IOException { final String request = exchange.getRequestMethod() + " " + exchange.getRequestURI().toString(); try { - if (Regex.simpleMatch("PUT /container/*", request)) { - blobs.put(exchange.getRequestURI().toString(), Streams.readFully(exchange.getRequestBody())); + if (Regex.simpleMatch("PUT /container/*blockid=*", request)) { + final Map params = new HashMap<>(); + RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); + + final String blockId = params.get("blockid"); + blobs.put(blockId, Streams.readFully(exchange.getRequestBody())); + exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); + + } else if (Regex.simpleMatch("PUT /container/*comp=blocklist*", request)) { + final String blockList = Streams.copyToString(new InputStreamReader(exchange.getRequestBody(), StandardCharsets.UTF_8)); + final List blockIds = Arrays.stream(blockList.split("")) + .filter(line -> line.contains("")) + .map(line -> line.substring(0, line.indexOf(""))) + .collect(Collectors.toList()); + + final ByteArrayOutputStream blob = new ByteArrayOutputStream(); + for (String blockId : blockIds) { + BytesReference block = blobs.remove(blockId); + assert block != null; + block.writeTo(blob); + } + blobs.put(exchange.getRequestURI().getPath(), new BytesArray(blob.toByteArray())); + exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); + + } else if (Regex.simpleMatch("PUT /container/*", request)) { + blobs.put(exchange.getRequestURI().getPath(), Streams.readFully(exchange.getRequestBody())); exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); } else if (Regex.simpleMatch("HEAD /container/*", request)) { - BytesReference blob = blobs.get(exchange.getRequestURI().toString()); + final BytesReference blob = blobs.get(exchange.getRequestURI().getPath()); if (blob == null) { exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); return; @@ -139,20 +179,28 @@ public class AzureBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTes exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); } else if (Regex.simpleMatch("GET /container/*", request)) { - final BytesReference blob = blobs.get(exchange.getRequestURI().toString()); + final BytesReference blob = blobs.get(exchange.getRequestURI().getPath()); if (blob == null) { exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); return; } + + final String range = exchange.getRequestHeaders().getFirst(Constants.HeaderConstants.STORAGE_RANGE_HEADER); + final Matcher matcher = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$").matcher(range); + assertTrue(matcher.matches()); + + final int start = Integer.parseInt(matcher.group(1)); + final int length = Integer.parseInt(matcher.group(2)) - start + 1; + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(blob.length())); + exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), blob.length()); - blob.writeTo(exchange.getResponseBody()); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); + exchange.getResponseBody().write(blob.toBytesRef().bytes, start, length); } else if (Regex.simpleMatch("DELETE /container/*", request)) { Streams.readFully(exchange.getRequestBody()); - blobs.entrySet().removeIf(blob -> blob.getKey().startsWith(exchange.getRequestURI().toString())); + blobs.entrySet().removeIf(blob -> blob.getKey().startsWith(exchange.getRequestURI().getPath())); exchange.sendResponseHeaders(RestStatus.ACCEPTED.getStatus(), -1); } else if (Regex.simpleMatch("GET /container?restype=container&comp=list*", request)) { @@ -187,4 +235,27 @@ public class AzureBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTes } } } + + /** + * HTTP handler that injects random Azure service errors + * + * Note: it is not a good idea to allow this handler to simulate too many errors as it would + * slow down the test suite. + */ + @SuppressForbidden(reason = "this test uses a HttpServer to emulate an Azure endpoint") + private static class AzureErroneousHttpHandler extends ErroneousHttpHandler { + + AzureErroneousHttpHandler(final HttpHandler delegate, final int maxErrorsPerRequest) { + super(delegate, maxErrorsPerRequest); + } + + @Override + protected String requestUniqueId(final HttpExchange exchange) { + final String requestId = exchange.getRequestHeaders().getFirst(Constants.HeaderConstants.CLIENT_REQUEST_ID_HEADER); + final String range = exchange.getRequestHeaders().getFirst(Constants.HeaderConstants.STORAGE_RANGE_HEADER); + return exchange.getRequestMethod() + + " " + requestId + + (range != null ? " " + range : ""); + } + } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 1c5c2dd39fa..f791550adeb 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -19,15 +19,25 @@ package org.elasticsearch.repositories.azure; +import com.microsoft.azure.storage.OperationContext; +import com.microsoft.azure.storage.StorageException; +import com.microsoft.azure.storage.blob.CloudBlobClient; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import java.net.HttpURLConnection; import java.util.Collection; +import java.util.function.Supplier; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -71,5 +81,32 @@ public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyReposi .put("base_path", System.getProperty("test.azure.base")) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + if (Strings.hasText(System.getProperty("test.azure.sas_token"))) { + ensureSasTokenPermissions(); + } + } + + private void ensureSasTokenPermissions() { + final BlobStoreRepository repository = getRepository(); + final PlainActionFuture future = PlainActionFuture.newFuture(); + repository.threadPool().generic().execute(ActionRunnable.wrap(future, l -> { + final AzureBlobStore blobStore = (AzureBlobStore) repository.blobStore(); + final String account = "default"; + final Tuple> client = blobStore.getService().client(account); + final CloudBlobContainer blobContainer = client.v1().getContainerReference(blobStore.toString()); + try { + SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, client.v2().get())); + future.onFailure(new RuntimeException( + "The SAS token used in this test allowed for checking container existence. This test only supports tokens " + + "that grant only the documented permission requirements for the Azure repository plugin.")); + } catch (StorageException e) { + if (e.getHttpStatusCode() == HttpURLConnection.HTTP_FORBIDDEN) { + future.onResponse(null); + } else { + future.onFailure(e); + } + } + })); + future.actionGet(); } } diff --git a/plugins/repository-gcs/licenses/httpclient-4.5.10.jar.sha1 b/plugins/repository-gcs/licenses/httpclient-4.5.10.jar.sha1 new file mode 100644 index 00000000000..b708efd0dd5 --- /dev/null +++ b/plugins/repository-gcs/licenses/httpclient-4.5.10.jar.sha1 @@ -0,0 +1 @@ +7ca2e4276f4ef95e4db725a8cd4a1d1e7585b9e5 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/httpclient-4.5.8.jar.sha1 b/plugins/repository-gcs/licenses/httpclient-4.5.8.jar.sha1 deleted file mode 100644 index 73f0d30c709..00000000000 --- a/plugins/repository-gcs/licenses/httpclient-4.5.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/httpcore-4.4.11.jar.sha1 b/plugins/repository-gcs/licenses/httpcore-4.4.11.jar.sha1 deleted file mode 100644 index 6d64372bfcc..00000000000 --- a/plugins/repository-gcs/licenses/httpcore-4.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de748cf874e4e193b42eceea9fe5574fabb9d4df \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/httpcore-4.4.12.jar.sha1 b/plugins/repository-gcs/licenses/httpcore-4.4.12.jar.sha1 new file mode 100644 index 00000000000..3c046171b30 --- /dev/null +++ b/plugins/repository-gcs/licenses/httpcore-4.4.12.jar.sha1 @@ -0,0 +1 @@ +21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 5586be349bc..520b5b798a6 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -31,6 +31,10 @@ import com.google.cloud.storage.Storage; import com.google.cloud.storage.Storage.BlobListOption; import com.google.cloud.storage.StorageBatch; import com.google.cloud.storage.StorageException; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -60,16 +64,19 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static java.net.HttpURLConnection.HTTP_GONE; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; import static java.net.HttpURLConnection.HTTP_PRECON_FAILED; class GoogleCloudStorageBlobStore implements BlobStore { + private static final Logger logger = LogManager.getLogger(GoogleCloudStorageBlobStore.class); + // The recommended maximum size of a blob that should be uploaded in a single // request. Larger files should be uploaded over multiple requests (this is // called "resumable upload") // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload - private static final int LARGE_BLOB_THRESHOLD_BYTE_SIZE = 5 * 1024 * 1024; + public static final int LARGE_BLOB_THRESHOLD_BYTE_SIZE = 5 * 1024 * 1024; private final String bucketName; private final String clientName; @@ -208,13 +215,18 @@ class GoogleCloudStorageBlobStore implements BlobStore { */ void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { final BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, blobName).build(); - if (blobSize > LARGE_BLOB_THRESHOLD_BYTE_SIZE) { + if (blobSize > getLargeBlobThresholdInBytes()) { writeBlobResumable(blobInfo, inputStream, failIfAlreadyExists); } else { writeBlobMultipart(blobInfo, inputStream, blobSize, failIfAlreadyExists); } } + // non-static, package private for testing + long getLargeBlobThresholdInBytes() { + return LARGE_BLOB_THRESHOLD_BYTE_SIZE; + } + /** * Uploads a blob using the "resumable upload" method (multiple requests, which * can be independently retried in case of failure, see @@ -224,35 +236,53 @@ class GoogleCloudStorageBlobStore implements BlobStore { * @param failIfAlreadyExists whether to throw a FileAlreadyExistsException if the given blob already exists */ private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream, boolean failIfAlreadyExists) throws IOException { - try { - final Storage.BlobWriteOption[] writeOptions = failIfAlreadyExists ? - new Storage.BlobWriteOption[] { Storage.BlobWriteOption.doesNotExist() } : - new Storage.BlobWriteOption[0]; - final WriteChannel writeChannel = SocketAccess + // We retry 410 GONE errors to cover the unlikely but possible scenario where a resumable upload session becomes broken and + // needs to be restarted from scratch. Given how unlikely a 410 error should be according to SLAs we retry only twice. + assert inputStream.markSupported(); + inputStream.mark(Integer.MAX_VALUE); + StorageException storageException = null; + final Storage.BlobWriteOption[] writeOptions = failIfAlreadyExists ? + new Storage.BlobWriteOption[]{Storage.BlobWriteOption.doesNotExist()} : new Storage.BlobWriteOption[0]; + for (int retry = 0; retry < 3; ++retry) { + try { + final WriteChannel writeChannel = SocketAccess .doPrivilegedIOException(() -> client().writer(blobInfo, writeOptions)); - Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { - @Override - public boolean isOpen() { - return writeChannel.isOpen(); - } + Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { + @Override + public boolean isOpen() { + return writeChannel.isOpen(); + } - @Override - public void close() throws IOException { - SocketAccess.doPrivilegedVoidIOException(writeChannel::close); - } + @Override + public void close() throws IOException { + SocketAccess.doPrivilegedVoidIOException(writeChannel::close); + } - @SuppressForbidden(reason = "Channel is based of a socket not a file") - @Override - public int write(ByteBuffer src) throws IOException { - return SocketAccess.doPrivilegedIOException(() -> writeChannel.write(src)); + @SuppressForbidden(reason = "Channel is based of a socket not a file") + @Override + public int write(ByteBuffer src) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> writeChannel.write(src)); + } + })); + return; + } catch (final StorageException se) { + final int errorCode = se.getCode(); + if (errorCode == HTTP_GONE) { + logger.warn(() -> new ParameterizedMessage("Retrying broken resumable upload session for blob {}", blobInfo), se); + storageException = ExceptionsHelper.useOrSuppress(storageException, se); + inputStream.reset(); + continue; + } else if (failIfAlreadyExists && errorCode == HTTP_PRECON_FAILED) { + throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); } - })); - } catch (final StorageException se) { - if (failIfAlreadyExists && se.getCode() == HTTP_PRECON_FAILED) { - throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); + if (storageException != null) { + se.addSuppressed(storageException); + } + throw se; } - throw se; } + assert storageException != null; + throw storageException; } /** @@ -267,7 +297,7 @@ class GoogleCloudStorageBlobStore implements BlobStore { */ private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { - assert blobSize <= LARGE_BLOB_THRESHOLD_BYTE_SIZE : "large blob uploads should use the resumable upload method"; + assert blobSize <= getLargeBlobThresholdInBytes() : "large blob uploads should use the resumable upload method"; final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.toIntExact(blobSize)); Streams.copy(inputStream, baos); try { @@ -334,31 +364,36 @@ class GoogleCloudStorageBlobStore implements BlobStore { } final List blobIdsToDelete = blobNames.stream().map(blob -> BlobId.of(bucketName, blob)).collect(Collectors.toList()); final List failedBlobs = Collections.synchronizedList(new ArrayList<>()); - final StorageException e = SocketAccess.doPrivilegedIOException(() -> { - final AtomicReference ioe = new AtomicReference<>(); - final StorageBatch batch = client().batch(); - for (BlobId blob : blobIdsToDelete) { - batch.delete(blob).notify( - new BatchResult.Callback() { - @Override - public void success(Boolean result) { - } + try { + SocketAccess.doPrivilegedVoidIOException(() -> { + final AtomicReference ioe = new AtomicReference<>(); + final StorageBatch batch = client().batch(); + for (BlobId blob : blobIdsToDelete) { + batch.delete(blob).notify( + new BatchResult.Callback() { + @Override + public void success(Boolean result) { + } - @Override - public void error(StorageException exception) { - if (exception.getCode() != HTTP_NOT_FOUND) { - failedBlobs.add(blob); - if (ioe.compareAndSet(null, exception) == false) { - ioe.get().addSuppressed(exception); + @Override + public void error(StorageException exception) { + if (exception.getCode() != HTTP_NOT_FOUND) { + failedBlobs.add(blob); + if (ioe.compareAndSet(null, exception) == false) { + ioe.get().addSuppressed(exception); + } } } - } - }); - } - batch.submit(); - return ioe.get(); - }); - if (e != null) { + }); + } + batch.submit(); + + final StorageException exception = ioe.get(); + if (exception != null) { + throw exception; + } + }); + } catch (final Exception e) { throw new IOException("Exception when deleting blobs [" + failedBlobs + "]", e); } assert failedBlobs.isEmpty(); diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index 8adfaeb4273..501e6f4d086 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -100,7 +100,7 @@ public class GoogleCloudStorageService { * @return a new client storage instance that can be used to manage objects * (blobs) */ - private static Storage createClient(String clientName, GoogleCloudStorageClientSettings clientSettings) throws IOException { + private Storage createClient(String clientName, GoogleCloudStorageClientSettings clientSettings) throws IOException { logger.debug(() -> new ParameterizedMessage("creating GCS client with client_name [{}], endpoint [{}]", clientName, clientSettings.getHost())); final HttpTransport httpTransport = SocketAccess.doPrivilegedIOException(() -> { @@ -111,10 +111,16 @@ public class GoogleCloudStorageService { return builder.build(); }); final HttpTransportOptions httpTransportOptions = HttpTransportOptions.newBuilder() - .setConnectTimeout(toTimeout(clientSettings.getConnectTimeout())) - .setReadTimeout(toTimeout(clientSettings.getReadTimeout())) - .setHttpTransportFactory(() -> httpTransport) - .build(); + .setConnectTimeout(toTimeout(clientSettings.getConnectTimeout())) + .setReadTimeout(toTimeout(clientSettings.getReadTimeout())) + .setHttpTransportFactory(() -> httpTransport) + .build(); + final StorageOptions storageOptions = createStorageOptions(clientSettings, httpTransportOptions); + return storageOptions.getService(); + } + + StorageOptions createStorageOptions(final GoogleCloudStorageClientSettings clientSettings, + final HttpTransportOptions httpTransportOptions) { final StorageOptions.Builder storageOptionsBuilder = StorageOptions.newBuilder() .setTransportOptions(httpTransportOptions) .setHeaderProvider(() -> { @@ -144,7 +150,7 @@ public class GoogleCloudStorageService { } storageOptionsBuilder.setCredentials(serviceAccountCredentials); } - return storageOptionsBuilder.build().getService(); + return storageOptionsBuilder.build(); } /** diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java index 2f23011d4d9..cc3782cabac 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -19,13 +19,36 @@ package org.elasticsearch.repositories.gcs; +import com.google.cloud.BatchResult; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageBatch; +import com.google.cloud.storage.StorageBatchResult; +import com.google.cloud.storage.StorageException; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.List; import java.util.Locale; import java.util.concurrent.ConcurrentHashMap; +import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes; +import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -37,10 +60,82 @@ public class GoogleCloudStorageBlobStoreContainerTests extends ESBlobStoreContai final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final GoogleCloudStorageService storageService = mock(GoogleCloudStorageService.class); try { - when(storageService.client(any(String.class))).thenReturn(new MockStorage(bucketName, new ConcurrentHashMap<>())); + when(storageService.client(any(String.class))).thenReturn(new MockStorage(bucketName, new ConcurrentHashMap<>(), random())); } catch (final Exception e) { throw new RuntimeException(e); } return new GoogleCloudStorageBlobStore(bucketName, clientName, storageService); } + + public void testWriteReadLarge() throws IOException { + try(BlobStore store = newBlobStore()) { + final BlobContainer container = store.blobContainer(new BlobPath()); + byte[] data = randomBytes(GoogleCloudStorageBlobStore.LARGE_BLOB_THRESHOLD_BYTE_SIZE + 1); + writeBlob(container, "foobar", new BytesArray(data), randomBoolean()); + if (randomBoolean()) { + // override file, to check if we get latest contents + random().nextBytes(data); + writeBlob(container, "foobar", new BytesArray(data), false); + } + try (InputStream stream = container.readBlob("foobar")) { + BytesRefBuilder target = new BytesRefBuilder(); + while (target.length() < data.length) { + byte[] buffer = new byte[scaledRandomIntBetween(1, data.length - target.length())]; + int offset = scaledRandomIntBetween(0, buffer.length - 1); + int read = stream.read(buffer, offset, buffer.length - offset); + target.append(new BytesRef(buffer, offset, read)); + } + assertEquals(data.length, target.length()); + assertArrayEquals(data, Arrays.copyOfRange(target.bytes(), 0, target.length())); + } + } + } + + @SuppressWarnings("unchecked") + public void testDeleteBlobsIgnoringIfNotExistsThrowsIOException() throws Exception { + final List blobs = Arrays.asList("blobA", "blobB"); + + final StorageBatch batch = mock(StorageBatch.class); + if (randomBoolean()) { + StorageBatchResult result = mock(StorageBatchResult.class); + when(batch.delete(any(BlobId.class))).thenReturn(result); + doThrow(new StorageException(new IOException("Batch submit throws a storage exception"))).when(batch).submit(); + } else { + StorageBatchResult resultA = mock(StorageBatchResult.class); + doReturn(resultA).when(batch).delete(eq(BlobId.of("bucket", "blobA"))); + doAnswer(invocation -> { + StorageException storageException = new StorageException(new IOException("Batched delete throws a storage exception")); + ((BatchResult.Callback) invocation.getArguments()[0]).error(storageException); + return null; + }).when(resultA).notify(any(StorageBatchResult.Callback.class)); + + StorageBatchResult resultB = mock(StorageBatchResult.class); + doReturn(resultB).when(batch).delete(eq(BlobId.of("bucket", "blobB"))); + doAnswer(invocation -> { + if (randomBoolean()) { + StorageException storageException = new StorageException(new IOException("Batched delete throws a storage exception")); + ((BatchResult.Callback) invocation.getArguments()[0]).error(storageException); + } else { + ((BatchResult.Callback) invocation.getArguments()[0]).success(randomBoolean()); + } + return null; + }).when(resultB).notify(any(StorageBatchResult.Callback.class)); + + doNothing().when(batch).submit(); + } + + final Storage storage = mock(Storage.class); + when(storage.get("bucket")).thenReturn(mock(Bucket.class)); + when(storage.batch()).thenReturn(batch); + + final GoogleCloudStorageService storageService = mock(GoogleCloudStorageService.class); + when(storageService.client(any(String.class))).thenReturn(storage); + + try (BlobStore store = new GoogleCloudStorageBlobStore("bucket", "test", storageService)) { + final BlobContainer container = store.blobContainer(new BlobPath()); + + IOException e = expectThrows(IOException.class, () -> container.deleteBlobsIgnoringIfNotExists(blobs)); + assertThat(e.getCause(), instanceOf(StorageException.class)); + } + } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index e0715850296..914746f7830 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -19,40 +19,40 @@ package org.elasticsearch.repositories.gcs; +import com.google.api.gax.retrying.RetrySettings; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.StorageOptions; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; -import com.sun.net.httpserver.HttpServer; import org.apache.http.HttpStatus; +import org.apache.lucene.util.ArrayUtil; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.blobstore.ESMockAPIBasedRepositoryIntegTestCase; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestUtils; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; +import org.elasticsearch.threadpool.ThreadPool; +import org.threeten.bp.Duration; import java.io.BufferedInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.UnsupportedEncodingException; -import java.net.InetAddress; -import java.net.InetSocketAddress; import java.net.URLDecoder; import java.security.KeyPairGenerator; import java.util.Arrays; @@ -80,36 +80,10 @@ import static org.elasticsearch.repositories.gcs.GoogleCloudStorageRepository.BU import static org.elasticsearch.repositories.gcs.GoogleCloudStorageRepository.CLIENT_NAME; @SuppressForbidden(reason = "this test uses a HttpServer to emulate a Google Cloud Storage endpoint") -public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { +public class GoogleCloudStorageBlobStoreRepositoryTests extends ESMockAPIBasedRepositoryIntegTestCase { - private static HttpServer httpServer; private static byte[] serviceAccount; - @BeforeClass - public static void startHttpServer() throws Exception { - httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); - httpServer.start(); - serviceAccount = createServiceAccount(); - } - - @Before - public void setUpHttpServer() { - httpServer.createContext("/", new InternalHttpHandler()); - httpServer.createContext("/token", new FakeOAuth2HttpHandler()); - } - - @AfterClass - public static void stopHttpServer() { - httpServer.stop(0); - httpServer = null; - } - - @After - public void tearDownHttpServer() { - httpServer.removeContext("/"); - httpServer.removeContext("/token"); - } - @Override protected String repositoryType() { return GoogleCloudStorageRepository.TYPE; @@ -126,23 +100,36 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos @Override protected Collection> nodePlugins() { - return Collections.singletonList(GoogleCloudStoragePlugin.class); + return Collections.singletonList(TestGoogleCloudStoragePlugin.class); + } + + @Override + protected Map createHttpHandlers() { + final Map handlers = new HashMap<>(2); + handlers.put("/", new InternalHttpHandler()); + handlers.put("/token", new FakeOAuth2HttpHandler()); + return Collections.unmodifiableMap(handlers); + } + + @Override + protected HttpHandler createErroneousHttpHandler(final HttpHandler delegate) { + return new GoogleErroneousHttpHandler(delegate, randomIntBetween(2, 3)); } @Override protected Settings nodeSettings(int nodeOrdinal) { + if (serviceAccount == null) { + serviceAccount = createServiceAccount(); + } + final Settings.Builder settings = Settings.builder(); settings.put(super.nodeSettings(nodeOrdinal)); - - final InetSocketAddress address = httpServer.getAddress(); - final String endpoint = "http://" + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort(); - settings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace("test").getKey(), endpoint); - settings.put(TOKEN_URI_SETTING.getConcreteSettingForNamespace("test").getKey(), endpoint + "/token"); + settings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace("test").getKey(), httpServerUrl()); + settings.put(TOKEN_URI_SETTING.getConcreteSettingForNamespace("test").getKey(), httpServerUrl() + "/token"); final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setFile(CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace("test").getKey(), serviceAccount); settings.setSecureSettings(secureSettings); - return settings.build(); } @@ -184,36 +171,85 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos assertEquals("failed to parse value [101mb] for setting [chunk_size], must be <= [100mb]", e.getMessage()); } - private static byte[] createServiceAccount() throws Exception { - final KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA"); - keyPairGenerator.initialize(1024); - final String privateKey = Base64.getEncoder().encodeToString(keyPairGenerator.generateKeyPair().getPrivate().getEncoded()); + public static class TestGoogleCloudStoragePlugin extends GoogleCloudStoragePlugin { - final ByteArrayOutputStream out = new ByteArrayOutputStream(); - try (XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), out)) { - builder.startObject(); - { - builder.field("type", "service_account"); - builder.field("project_id", getTestClass().getName().toLowerCase(Locale.ROOT)); - builder.field("private_key_id", UUID.randomUUID().toString()); - builder.field("private_key", "-----BEGIN PRIVATE KEY-----\n" + privateKey + "\n-----END PRIVATE KEY-----\n"); - builder.field("client_email", "elastic@appspot.gserviceaccount.com"); - builder.field("client_id", String.valueOf(randomNonNegativeLong())); - } - builder.endObject(); + public TestGoogleCloudStoragePlugin(Settings settings) { + super(settings); + } + + @Override + protected GoogleCloudStorageService createStorageService() { + return new GoogleCloudStorageService() { + @Override + StorageOptions createStorageOptions(final GoogleCloudStorageClientSettings clientSettings, + final HttpTransportOptions httpTransportOptions) { + StorageOptions options = super.createStorageOptions(clientSettings, httpTransportOptions); + return options.toBuilder() + .setRetrySettings(RetrySettings.newBuilder() + .setTotalTimeout(options.getRetrySettings().getTotalTimeout()) + .setInitialRetryDelay(Duration.ofMillis(10L)) + .setRetryDelayMultiplier(options.getRetrySettings().getRetryDelayMultiplier()) + .setMaxRetryDelay(Duration.ofSeconds(1L)) + .setMaxAttempts(0) + .setJittered(false) + .setInitialRpcTimeout(options.getRetrySettings().getInitialRpcTimeout()) + .setRpcTimeoutMultiplier(options.getRetrySettings().getRpcTimeoutMultiplier()) + .setMaxRpcTimeout(options.getRetrySettings().getMaxRpcTimeout()) + .build()) + .build(); + } + }; + } + + @Override + public Map getRepositories(Environment env, NamedXContentRegistry registry, ThreadPool threadPool) { + return Collections.singletonMap(GoogleCloudStorageRepository.TYPE, + metadata -> new GoogleCloudStorageRepository(metadata, registry, this.storageService, threadPool) { + @Override + protected GoogleCloudStorageBlobStore createBlobStore() { + return new GoogleCloudStorageBlobStore("bucket", "test", storageService) { + @Override + long getLargeBlobThresholdInBytes() { + return ByteSizeUnit.MB.toBytes(1); + } + }; + } + }); + } + } + + private static byte[] createServiceAccount() { + try { + final KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA"); + keyPairGenerator.initialize(1024); + final String privateKey = Base64.getEncoder().encodeToString(keyPairGenerator.generateKeyPair().getPrivate().getEncoded()); + + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + try (XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), out)) { + builder.startObject(); + { + builder.field("type", "service_account"); + builder.field("project_id", getTestClass().getName().toLowerCase(Locale.ROOT)); + builder.field("private_key_id", UUID.randomUUID().toString()); + builder.field("private_key", "-----BEGIN PRIVATE KEY-----\n" + privateKey + "\n-----END PRIVATE KEY-----\n"); + builder.field("client_email", "elastic@appspot.gserviceaccount.com"); + builder.field("client_id", String.valueOf(randomNonNegativeLong())); + } + builder.endObject(); + } + return out.toByteArray(); + } catch (Exception e) { + throw new AssertionError("Unable to create service account file", e); } - return out.toByteArray(); } /** * Minimal HTTP handler that acts as a Google Cloud Storage compliant server - * - * Note: it does not support resumable uploads */ @SuppressForbidden(reason = "this test uses a HttpServer to emulate a Google Cloud Storage endpoint") private static class InternalHttpHandler implements HttpHandler { - private final ConcurrentMap blobs = new ConcurrentHashMap<>(); + private final ConcurrentMap blobs = new ConcurrentHashMap<>(); @Override public void handle(final HttpExchange exchange) throws IOException { @@ -224,13 +260,13 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); final String prefix = params.get("prefix"); - final List> listOfBlobs = blobs.entrySet().stream() + final List> listOfBlobs = blobs.entrySet().stream() .filter(blob -> prefix == null || blob.getKey().startsWith(prefix)).collect(Collectors.toList()); final StringBuilder list = new StringBuilder(); list.append("{\"kind\":\"storage#objects\",\"items\":["); - for (Iterator> it = listOfBlobs.iterator(); it.hasNext(); ) { - Map.Entry blob = it.next(); + for (Iterator> it = listOfBlobs.iterator(); it.hasNext(); ) { + Map.Entry blob = it.next(); list.append("{\"kind\":\"storage#object\","); list.append("\"bucket\":\"bucket\","); list.append("\"name\":\"").append(blob.getKey()).append("\","); @@ -256,19 +292,24 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos exchange.getResponseBody().write(response); } else if (Regex.simpleMatch("GET /download/storage/v1/b/bucket/o/*", request)) { - BytesReference blob = blobs.get(exchange.getRequestURI().getPath().replace("/download/storage/v1/b/bucket/o/", "")); + BytesArray blob = blobs.get(exchange.getRequestURI().getPath().replace("/download/storage/v1/b/bucket/o/", "")); if (blob != null) { + final String range = exchange.getRequestHeaders().getFirst("Range"); + Matcher matcher = Pattern.compile("bytes=([0-9]*)-([0-9]*)").matcher(range); + assert matcher.find(); + + byte[] response = Integer.parseInt(matcher.group(1)) == 0 ? blob.array() : new byte[0]; exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), blob.length()); - exchange.getResponseBody().write(blob.toBytesRef().bytes); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); + exchange.getResponseBody().write(response); } else { exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); } } else if (Regex.simpleMatch("DELETE /storage/v1/b/bucket/o/*", request)) { int deletions = 0; - for (Iterator> iterator = blobs.entrySet().iterator(); iterator.hasNext(); ) { - Map.Entry blob = iterator.next(); + for (Iterator> iterator = blobs.entrySet().iterator(); iterator.hasNext(); ) { + Map.Entry blob = iterator.next(); if (blob.getKey().equals(exchange.getRequestURI().toString())) { iterator.remove(); deletions++; @@ -304,8 +345,8 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos exchange.getResponseBody().write(response); } else if (Regex.simpleMatch("POST /upload/storage/v1/b/bucket/*uploadType=multipart*", request)) { - byte[] response = new byte[0]; try (BufferedInputStream in = new BufferedInputStream(new GZIPInputStream(exchange.getRequestBody()))) { + byte[] response = new byte[0]; String blob = null; int read; while ((read = in.read()) != -1) { @@ -353,16 +394,74 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos byte[] tmp = binary.toByteArray(); // removes the trailing end "\r\n--__END_OF_PART__--\r\n" which is 23 bytes long blobs.put(blob, new BytesArray(Arrays.copyOf(tmp, tmp.length - 23))); + + exchange.getResponseHeaders().add("Content-Type", "application/json"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); + exchange.getResponseBody().write(response); + } finally { blob = null; } } } } + + } else if (Regex.simpleMatch("POST /upload/storage/v1/b/bucket/*uploadType=resumable*", request)) { + final Map params = new HashMap<>(); + RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); + final String blobName = params.get("name"); + blobs.put(blobName, BytesArray.EMPTY); + + byte[] response = Streams.readFully(exchange.getRequestBody()).utf8ToString().getBytes(UTF_8); exchange.getResponseHeaders().add("Content-Type", "application/json"); + exchange.getResponseHeaders().add("Location", httpServerUrl() + "/upload/storage/v1/b/bucket/o?" + + "uploadType=resumable" + + "&upload_id=" + UUIDs.randomBase64UUID() + + "&test_blob_name=" + blobName); // not a Google Storage parameter, but it allows to pass the blob name exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); exchange.getResponseBody().write(response); + } else if (Regex.simpleMatch("PUT /upload/storage/v1/b/bucket/o?*uploadType=resumable*", request)) { + final Map params = new HashMap<>(); + RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); + + final String blobName = params.get("test_blob_name"); + final String range = exchange.getRequestHeaders().getFirst("Content-Range"); + assert Strings.hasLength(range); + + Matcher matcher = Pattern.compile("bytes ([^/]*)/([0-9\\*]*)").matcher(range); + if (matcher.find()) { + String bytes = matcher.group(1); + String limit = matcher.group(2); + byte[] blob = blobs.get(blobName).array(); + assert blob != null; + // client is uploading a chunk + matcher = Pattern.compile("([0-9]*)-([0-9]*)").matcher(bytes); + assert matcher.find(); + + int end = Integer.parseInt(matcher.group(2)); + int start = Integer.parseInt(matcher.group(1)); + + final ByteArrayOutputStream out = new ByteArrayOutputStream(); + long count = Streams.copy(exchange.getRequestBody(), out); + int length = Math.max(end + 1, "*".equals(limit) ? 0 : Integer.parseInt(limit)); + assert count <= length; + if (length > blob.length) { + blob = ArrayUtil.growExact(blob, length); + } + assert blob.length >= end; + System.arraycopy(out.toByteArray(), 0, blob, start, Math.toIntExact(count)); + blobs.put(blobName, new BytesArray(blob)); + + if ("*".equals(limit)) { + exchange.getResponseHeaders().add("Range", String.format(Locale.ROOT, "bytes=%d/%d", start, end)); + exchange.getResponseHeaders().add("Content-Length", "0"); + exchange.sendResponseHeaders(308 /* Resume Incomplete */, -1); + } else { + assert blob.length == Integer.parseInt(limit); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); + } + } } else { exchange.sendResponseHeaders(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), -1); } @@ -383,4 +482,34 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESBlobStoreRepos exchange.close(); } } + + /** + * HTTP handler that injects random Google Cloud Storage service errors + * + * Note: it is not a good idea to allow this handler to simulate too many errors as it would + * slow down the test suite. + */ + @SuppressForbidden(reason = "this test uses a HttpServer to emulate a Google Cloud Storage endpoint") + private static class GoogleErroneousHttpHandler extends ErroneousHttpHandler { + + GoogleErroneousHttpHandler(final HttpHandler delegate, final int maxErrorsPerRequest) { + super(delegate, maxErrorsPerRequest); + } + + @Override + protected String requestUniqueId(HttpExchange exchange) { + final String range = exchange.getRequestHeaders().getFirst("Content-Range"); + return exchange.getRemoteAddress().toString() + + " " + exchange.getRequestMethod() + + " " + exchange.getRequestURI() + + (range != null ? " " + range : ""); + } + + @Override + protected boolean canFailRequest(final HttpExchange exchange) { + // Batch requests are not retried so we don't want to fail them + // The batched request are supposed to be retried (not tested here) + return exchange.getRequestURI().toString().startsWith("/batch/") == false; + } + } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java index e2adfed94bb..294adfdaec5 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java @@ -37,7 +37,7 @@ public class GoogleCloudStorageBlobStoreTests extends ESBlobStoreTestCase { final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final GoogleCloudStorageService storageService = mock(GoogleCloudStorageService.class); try { - when(storageService.client(any(String.class))).thenReturn(new MockStorage(bucketName, new ConcurrentHashMap<>())); + when(storageService.client(any(String.class))).thenReturn(new MockStorage(bucketName, new ConcurrentHashMap<>(), random())); } catch (final Exception e) { throw new RuntimeException(e); } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java index ca6ca60e41e..627bb8de943 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java @@ -40,6 +40,7 @@ import com.google.cloud.storage.StorageException; import com.google.cloud.storage.StorageOptions; import com.google.cloud.storage.StorageRpcOptionUtils; import com.google.cloud.storage.StorageTestUtils; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.internal.io.IOUtils; import org.mockito.stubbing.Answer; @@ -47,6 +48,7 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; +import java.net.HttpURLConnection; import java.net.URL; import java.nio.ByteBuffer; import java.nio.channels.Channels; @@ -55,6 +57,8 @@ import java.nio.channels.WritableByteChannel; import java.util.ArrayList; import java.util.List; import java.util.Objects; +import java.util.Random; +import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -71,10 +75,12 @@ import static org.mockito.Mockito.mock; */ class MockStorage implements Storage { + private final Random random; private final String bucketName; private final ConcurrentMap blobs; - MockStorage(final String bucket, final ConcurrentMap blobs) { + MockStorage(final String bucket, final ConcurrentMap blobs, final Random random) { + this.random = random; this.bucketName = Objects.requireNonNull(bucket); this.blobs = Objects.requireNonNull(blobs); } @@ -236,12 +242,16 @@ class MockStorage implements Storage { return null; } + private final Set simulated410s = ConcurrentCollections.newConcurrentSet(); + @Override public WriteChannel writer(BlobInfo blobInfo, BlobWriteOption... options) { if (bucketName.equals(blobInfo.getBucket())) { final ByteArrayOutputStream output = new ByteArrayOutputStream(); return new WriteChannel() { + private volatile boolean failed; + final WritableByteChannel writableByteChannel = Channels.newChannel(output); @Override @@ -256,6 +266,11 @@ class MockStorage implements Storage { @Override public int write(ByteBuffer src) throws IOException { + // Only fail a blob once on a 410 error since the error is so unlikely in practice + if (simulated410s.add(blobInfo) && random.nextBoolean()) { + failed = true; + throw new StorageException(HttpURLConnection.HTTP_GONE, "Simulated lost resumeable upload session"); + } return writableByteChannel.write(src); } @@ -267,13 +282,15 @@ class MockStorage implements Storage { @Override public void close() { IOUtils.closeWhileHandlingException(writableByteChannel); - if (Stream.of(options).anyMatch(option -> option.equals(BlobWriteOption.doesNotExist()))) { - byte[] existingBytes = blobs.putIfAbsent(blobInfo.getName(), output.toByteArray()); - if (existingBytes != null) { - throw new StorageException(412, "Blob already exists"); + if (failed == false) { + if (Stream.of(options).anyMatch(option -> option.equals(BlobWriteOption.doesNotExist()))) { + byte[] existingBytes = blobs.putIfAbsent(blobInfo.getName(), output.toByteArray()); + if (existingBytes != null) { + throw new StorageException(412, "Blob already exists"); + } + } else { + blobs.put(blobInfo.getName(), output.toByteArray()); } - } else { - blobs.put(blobInfo.getName(), output.toByteArray()); } } }; diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index acefe0eeb24..43b58ea7f39 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -75,6 +75,8 @@ normalization { runtimeClasspath { // ignore generated keytab files for the purposes of build avoidance ignore '*.keytab' + // ignore fixture ports file which is on the classpath primarily to pacify the security manager + ignore '*HdfsFixture/**' } } @@ -158,15 +160,14 @@ for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSec .resolve("secureHaHdfsFixture") .resolve("ports") nonInputProperties.systemProperty "test.hdfs-fixture.ports", path - classpath += files(path) } else { Path path = buildDir.toPath() .resolve("fixtures") .resolve("haHdfsFixture") .resolve("ports") nonInputProperties.systemProperty "test.hdfs-fixture.ports", path - classpath += files(path) } + classpath += files("$buildDir/fixtures") } if (integTestTaskName.contains("Secure")) { diff --git a/plugins/repository-s3/licenses/httpclient-4.5.10.jar.sha1 b/plugins/repository-s3/licenses/httpclient-4.5.10.jar.sha1 new file mode 100644 index 00000000000..b708efd0dd5 --- /dev/null +++ b/plugins/repository-s3/licenses/httpclient-4.5.10.jar.sha1 @@ -0,0 +1 @@ +7ca2e4276f4ef95e4db725a8cd4a1d1e7585b9e5 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpclient-4.5.8.jar.sha1 b/plugins/repository-s3/licenses/httpclient-4.5.8.jar.sha1 deleted file mode 100644 index 73f0d30c709..00000000000 --- a/plugins/repository-s3/licenses/httpclient-4.5.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpcore-4.4.11.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.11.jar.sha1 deleted file mode 100644 index 6d64372bfcc..00000000000 --- a/plugins/repository-s3/licenses/httpcore-4.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de748cf874e4e193b42eceea9fe5574fabb9d4df \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 new file mode 100644 index 00000000000..3c046171b30 --- /dev/null +++ b/plugins/repository-s3/licenses/httpcore-4.4.12.jar.sha1 @@ -0,0 +1 @@ +21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 46910d840cd..f73bc7c8732 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -21,7 +21,6 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.AmazonClientException; import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; -import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; @@ -31,7 +30,6 @@ import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.PartETag; import com.amazonaws.services.s3.model.PutObjectRequest; -import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; import org.apache.lucene.util.SetOnce; @@ -48,7 +46,6 @@ import org.elasticsearch.common.collect.Tuple; import java.io.IOException; import java.io.InputStream; -import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -81,18 +78,7 @@ class S3BlobContainer extends AbstractBlobContainer { @Override public InputStream readBlob(String blobName) throws IOException { - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(blobStore.bucket(), - buildKey(blobName))); - return s3Object.getObjectContent(); - } catch (final AmazonClientException e) { - if (e instanceof AmazonS3Exception) { - if (404 == ((AmazonS3Exception) e).getStatusCode()) { - throw new NoSuchFileException("Blob object [" + blobName + "] not found: " + e.getMessage()); - } - } - throw e; - } + return new S3RetryingInputStream(blobStore, buildKey(blobName)); } /** @@ -102,7 +88,7 @@ class S3BlobContainer extends AbstractBlobContainer { public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { assert inputStream.markSupported() : "No mark support on inputStream breaks the S3 SDK's ability to retry requests"; SocketAccess.doPrivilegedIOException(() -> { - if (blobSize <= blobStore.bufferSizeInBytes()) { + if (blobSize <= getLargeBlobThresholdInBytes()) { executeSingleUpload(blobStore, buildKey(blobName), inputStream, blobSize); } else { executeMultipartUpload(blobStore, buildKey(blobName), inputStream, blobSize); @@ -111,6 +97,11 @@ class S3BlobContainer extends AbstractBlobContainer { }); } + // package private for testing + long getLargeBlobThresholdInBytes() { + return blobStore.bufferSizeInBytes(); + } + @Override public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); @@ -347,15 +338,7 @@ class S3BlobContainer extends AbstractBlobContainer { final InputStream input, final long blobSize) throws IOException { - if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) { - throw new IllegalArgumentException("Multipart upload request size [" + blobSize - + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART); - } - if (blobSize < MIN_PART_SIZE_USING_MULTIPART.getBytes()) { - throw new IllegalArgumentException("Multipart upload request size [" + blobSize - + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART); - } - + ensureMultiPartUploadSize(blobSize); final long partSize = blobStore.bufferSizeInBytes(); final Tuple multiparts = numberOfMultiparts(blobSize, partSize); @@ -432,6 +415,18 @@ class S3BlobContainer extends AbstractBlobContainer { } } + // non-static, package private for testing + void ensureMultiPartUploadSize(final long blobSize) { + if (blobSize > MAX_FILE_SIZE_USING_MULTIPART.getBytes()) { + throw new IllegalArgumentException("Multipart upload request size [" + blobSize + + "] can't be larger than " + MAX_FILE_SIZE_USING_MULTIPART); + } + if (blobSize < MIN_PART_SIZE_USING_MULTIPART.getBytes()) { + throw new IllegalArgumentException("Multipart upload request size [" + blobSize + + "] can't be smaller than " + MIN_PART_SIZE_USING_MULTIPART); + } + } + /** * Returns the number parts of size of {@code partSize} needed to reach {@code totalSize}, * along with the size of the last (or unique) part. diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index fcded005535..a8cb87a5526 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -68,6 +68,10 @@ class S3BlobStore implements BlobStore { return service.client(repositoryMetaData); } + int getMaxRetries() { + return service.settings(repositoryMetaData).maxRetries; + } + public String bucket() { return bucket; } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 504ed32ff91..6a5847121fd 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -130,7 +130,7 @@ class S3Repository extends BlobStoreRepository { /** * Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy, - * standard_ia and intelligent_tiering. Defaults to standard. + * standard_ia, onezone_ia and intelligent_tiering. Defaults to standard. */ static final Setting STORAGE_CLASS_SETTING = Setting.simpleString("storage_class"); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java new file mode 100644 index 00000000000..cb3a89316f6 --- /dev/null +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java @@ -0,0 +1,159 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.services.s3.model.AmazonS3Exception; +import com.amazonaws.services.s3.model.GetObjectRequest; +import com.amazonaws.services.s3.model.S3Object; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.Version; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.NoSuchFileException; +import java.util.ArrayList; +import java.util.List; + +/** + * Wrapper around an S3 object that will retry the {@link GetObjectRequest} if the download fails part-way through, resuming from where + * the failure occurred. This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing + * the {@link Version#V_7_0_0} version constant) and removed when the SDK handles retries itself. + * + * See https://github.com/aws/aws-sdk-java/issues/856 for the related SDK issue + */ +class S3RetryingInputStream extends InputStream { + + private static final Logger logger = LogManager.getLogger(S3RetryingInputStream.class); + + static final int MAX_SUPPRESSED_EXCEPTIONS = 10; + + private final S3BlobStore blobStore; + private final String blobKey; + private final int maxAttempts; + + private InputStream currentStream; + private int attempt = 1; + private List failures = new ArrayList<>(MAX_SUPPRESSED_EXCEPTIONS); + private long currentOffset; + private boolean closed; + + S3RetryingInputStream(S3BlobStore blobStore, String blobKey) throws IOException { + this.blobStore = blobStore; + this.blobKey = blobKey; + this.maxAttempts = blobStore.getMaxRetries() + 1; + currentStream = openStream(); + } + + private InputStream openStream() throws IOException { + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + final GetObjectRequest getObjectRequest = new GetObjectRequest(blobStore.bucket(), blobKey); + if (currentOffset > 0) { + getObjectRequest.setRange(currentOffset); + } + final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest)); + return s3Object.getObjectContent(); + } catch (final AmazonClientException e) { + if (e instanceof AmazonS3Exception) { + if (404 == ((AmazonS3Exception) e).getStatusCode()) { + throw addSuppressedExceptions(new NoSuchFileException("Blob object [" + blobKey + "] not found: " + e.getMessage())); + } + } + throw addSuppressedExceptions(e); + } + } + + @Override + public int read() throws IOException { + ensureOpen(); + while (true) { + try { + final int result = currentStream.read(); + currentOffset += 1; + return result; + } catch (IOException e) { + reopenStreamOrFail(e); + } + } + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + ensureOpen(); + while (true) { + try { + final int bytesRead = currentStream.read(b, off, len); + if (bytesRead == -1) { + return -1; + } + currentOffset += bytesRead; + return bytesRead; + } catch (IOException e) { + reopenStreamOrFail(e); + } + } + } + + private void ensureOpen() { + if (closed) { + assert false : "using S3RetryingInputStream after close"; + throw new IllegalStateException("using S3RetryingInputStream after close"); + } + } + + private void reopenStreamOrFail(IOException e) throws IOException { + if (attempt >= maxAttempts) { + throw addSuppressedExceptions(e); + } + logger.debug(new ParameterizedMessage("failed reading [{}/{}] at offset [{}], attempt [{}] of [{}], retrying", + blobStore.bucket(), blobKey, currentOffset, attempt, maxAttempts), e); + attempt += 1; + if (failures.size() < MAX_SUPPRESSED_EXCEPTIONS) { + failures.add(e); + } + IOUtils.closeWhileHandlingException(currentStream); + currentStream = openStream(); + } + + @Override + public void close() throws IOException { + currentStream.close(); + closed = true; + } + + @Override + public long skip(long n) { + throw new UnsupportedOperationException("S3RetryingInputStream does not support seeking"); + } + + @Override + public void reset() { + throw new UnsupportedOperationException("S3RetryingInputStream does not support seeking"); + } + + private T addSuppressedExceptions(T e) { + for (IOException failure : failures) { + e.addSuppressed(failure); + } + return e; + } +} diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 4acd7783b49..3ba8145f6a4 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -38,7 +38,6 @@ import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; import java.io.Closeable; -import java.io.IOException; import java.util.Map; import static java.util.Collections.emptyMap; @@ -107,7 +106,7 @@ class S3Service implements Closeable { * @param repositoryMetaData Repository Metadata * @return S3ClientSettings */ - private S3ClientSettings settings(RepositoryMetaData repositoryMetaData) { + S3ClientSettings settings(RepositoryMetaData repositoryMetaData) { final String clientName = S3Repository.CLIENT_NAME.get(repositoryMetaData.settings()); final S3ClientSettings staticSettings = staticClientSettings.get(clientName); if (staticSettings != null) { @@ -229,7 +228,7 @@ class S3Service implements Closeable { } @Override - public void close() throws IOException { + public void close() { releaseCachedClients(); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index 2c97ae2b5fa..7060082ffcd 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -21,8 +21,11 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.SdkClientException; import com.amazonaws.services.s3.internal.MD5DigestCalculatingInputStream; import com.amazonaws.util.Base16; +import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpServer; +import org.apache.http.ConnectionClosedException; import org.apache.http.HttpStatus; +import org.apache.http.NoHttpResponseException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; @@ -51,12 +54,15 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.SocketTimeoutException; import java.nio.charset.StandardCharsets; +import java.nio.file.NoSuchFileException; import java.util.Arrays; import java.util.Locale; import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import static org.elasticsearch.repositories.s3.S3ClientSettings.DISABLE_CHUNKED_ENCODING; import static org.elasticsearch.repositories.s3.S3ClientSettings.ENDPOINT_SETTING; @@ -67,6 +73,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThan; /** * This class tests how a {@link S3BlobContainer} and its underlying AWS S3 client are retrying requests when reading or writing blobs. @@ -130,26 +137,41 @@ public class S3BlobContainerRetriesTests extends ESTestCase { repositoryMetaData)); } + public void testReadNonexistentBlobThrowsNoSuchFileException() { + final BlobContainer blobContainer = createBlobContainer(between(1, 5), null, null, null); + final Exception exception = expectThrows(NoSuchFileException.class, () -> blobContainer.readBlob("read_nonexistent_blob")); + assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("blob object [read_nonexistent_blob] not found")); + } + public void testReadBlobWithRetries() throws Exception { final int maxRetries = randomInt(5); final CountDown countDown = new CountDown(maxRetries + 1); - final byte[] bytes = randomByteArrayOfLength(randomIntBetween(1, 512)); + final byte[] bytes = randomBlobContent(); httpServer.createContext("/bucket/read_blob_max_retries", exchange -> { Streams.readFully(exchange.getRequestBody()); if (countDown.countDown()) { + final int rangeStart = getRangeStart(exchange); + assertThat(rangeStart, lessThan(bytes.length)); exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8"); - exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length); - exchange.getResponseBody().write(bytes); + exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length - rangeStart); + exchange.getResponseBody().write(bytes, rangeStart, bytes.length - rangeStart); exchange.close(); return; } - exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY, - HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1); - exchange.close(); + if (randomBoolean()) { + exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY, + HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1); + } else if (randomBoolean()) { + sendIncompleteContent(exchange, bytes); + } + if (randomBoolean()) { + exchange.close(); + } }); - final BlobContainer blobContainer = createBlobContainer(maxRetries, null, null, null); + final TimeValue readTimeout = TimeValue.timeValueMillis(between(100, 500)); + final BlobContainer blobContainer = createBlobContainer(maxRetries, readTimeout, null, null); try (InputStream inputStream = blobContainer.readBlob("read_blob_max_retries")) { assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream))); assertThat(countDown.isCountedDown(), is(true)); @@ -157,8 +179,9 @@ public class S3BlobContainerRetriesTests extends ESTestCase { } public void testReadBlobWithReadTimeouts() { - final TimeValue readTimeout = TimeValue.timeValueMillis(randomIntBetween(100, 500)); - final BlobContainer blobContainer = createBlobContainer(1, readTimeout, null, null); + final int maxRetries = randomInt(5); + final TimeValue readTimeout = TimeValue.timeValueMillis(between(100, 200)); + final BlobContainer blobContainer = createBlobContainer(maxRetries, readTimeout, null, null); // HTTP server does not send a response httpServer.createContext("/bucket/read_blob_unresponsive", exchange -> {}); @@ -168,15 +191,8 @@ public class S3BlobContainerRetriesTests extends ESTestCase { assertThat(exception.getCause(), instanceOf(SocketTimeoutException.class)); // HTTP server sends a partial response - final byte[] bytes = randomByteArrayOfLength(randomIntBetween(10, 128)); - httpServer.createContext("/bucket/read_blob_incomplete", exchange -> { - exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8"); - exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length); - exchange.getResponseBody().write(bytes, 0, randomIntBetween(1, bytes.length - 1)); - if (randomBoolean()) { - exchange.getResponseBody().flush(); - } - }); + final byte[] bytes = randomBlobContent(); + httpServer.createContext("/bucket/read_blob_incomplete", exchange -> sendIncompleteContent(exchange, bytes)); exception = expectThrows(SocketTimeoutException.class, () -> { try (InputStream stream = blobContainer.readBlob("read_blob_incomplete")) { @@ -184,13 +200,47 @@ public class S3BlobContainerRetriesTests extends ESTestCase { } }); assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out")); + assertThat(exception.getSuppressed().length, equalTo(maxRetries)); + } + + public void testReadBlobWithNoHttpResponse() { + final BlobContainer blobContainer = createBlobContainer(randomInt(5), null, null, null); + + // HTTP server closes connection immediately + httpServer.createContext("/bucket/read_blob_no_response", HttpExchange::close); + + Exception exception = expectThrows(SdkClientException.class, () -> blobContainer.readBlob("read_blob_no_response")); + assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("the target server failed to respond")); + assertThat(exception.getCause(), instanceOf(NoHttpResponseException.class)); + assertThat(exception.getSuppressed().length, equalTo(0)); + } + + public void testReadBlobWithPrematureConnectionClose() { + final int maxRetries = randomInt(20); + final BlobContainer blobContainer = createBlobContainer(maxRetries, null, null, null); + + // HTTP server sends a partial response + final byte[] bytes = randomBlobContent(); + httpServer.createContext("/bucket/read_blob_incomplete", exchange -> { + sendIncompleteContent(exchange, bytes); + exchange.close(); + }); + + final Exception exception = expectThrows(ConnectionClosedException.class, () -> { + try (InputStream stream = blobContainer.readBlob("read_blob_incomplete")) { + Streams.readFully(stream); + } + }); + assertThat(exception.getMessage().toLowerCase(Locale.ROOT), + containsString("premature end of content-length delimited message body")); + assertThat(exception.getSuppressed().length, equalTo(Math.min(S3RetryingInputStream.MAX_SUPPRESSED_EXCEPTIONS, maxRetries))); } public void testWriteBlobWithRetries() throws Exception { final int maxRetries = randomInt(5); final CountDown countDown = new CountDown(maxRetries + 1); - final byte[] bytes = randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb + final byte[] bytes = randomBlobContent(); httpServer.createContext("/bucket/write_blob_max_retries", exchange -> { if ("PUT".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery() == null) { if (countDown.countDown()) { @@ -343,6 +393,35 @@ public class S3BlobContainerRetriesTests extends ESTestCase { assertThat(countDownComplete.isCountedDown(), is(true)); } + private static byte[] randomBlobContent() { + return randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb + } + + private static int getRangeStart(HttpExchange exchange) { + final String rangeHeader = exchange.getRequestHeaders().getFirst("Range"); + if (rangeHeader == null) { + return 0; + } + + final Matcher matcher = Pattern.compile("^bytes=([0-9]+)-9223372036854775806$").matcher(rangeHeader); + assertTrue(rangeHeader + " matches expected pattern", matcher.matches()); + return Math.toIntExact(Long.parseLong(matcher.group(1))); + } + + private static void sendIncompleteContent(HttpExchange exchange, byte[] bytes) throws IOException { + final int rangeStart = getRangeStart(exchange); + assertThat(rangeStart, lessThan(bytes.length)); + exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8"); + exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length - rangeStart); + final int bytesToSend = randomIntBetween(0, bytes.length - rangeStart - 1); + if (bytesToSend > 0) { + exchange.getResponseBody().write(bytes, rangeStart, bytesToSend); + } + if (randomBoolean()) { + exchange.getResponseBody().flush(); + } + } + /** * A resettable InputStream that only serves zeros. **/ @@ -413,7 +492,7 @@ public class S3BlobContainerRetriesTests extends ESTestCase { } @Override - public void close() throws IOException { + public void close() { closed.set(true); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index ff35e55fd64..5762e34a19c 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -19,34 +19,35 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.http.AmazonHttpClient; -import com.amazonaws.services.s3.Headers; +import com.amazonaws.services.s3.internal.MD5DigestCalculatingInputStream; +import com.amazonaws.util.Base16; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; -import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpStatus; -import org.elasticsearch.common.Strings; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; +import org.elasticsearch.repositories.blobstore.ESMockAPIBasedRepositoryIntegTestCase; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestUtils; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; +import org.elasticsearch.snapshots.mockstore.BlobStoreWrapper; +import org.elasticsearch.threadpool.ThreadPool; +import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStreamReader; -import java.net.InetAddress; -import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; @@ -57,41 +58,12 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicInteger; import static java.nio.charset.StandardCharsets.UTF_8; import static org.hamcrest.Matchers.nullValue; @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") -public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { - - private static HttpServer httpServer; - - @BeforeClass - public static void startHttpServer() throws Exception { - httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); - httpServer.start(); - } - - @Before - public void setUpHttpServer() { - HttpHandler handler = new InternalHttpHandler(); - if (randomBoolean()) { - handler = new ErroneousHttpHandler(handler, randomIntBetween(2, 3)); - } - httpServer.createContext("/bucket", handler); - } - - @AfterClass - public static void stopHttpServer() { - httpServer.stop(0); - httpServer = null; - } - - @After - public void tearDownHttpServer() { - httpServer.removeContext("/bucket"); - } +public class S3BlobStoreRepositoryTests extends ESMockAPIBasedRepositoryIntegTestCase { @Override protected String repositoryType() { @@ -101,6 +73,7 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa @Override protected Settings repositorySettings() { return Settings.builder() + .put(super.repositorySettings()) .put(S3Repository.BUCKET_SETTING.getKey(), "bucket") .put(S3Repository.CLIENT_NAME.getKey(), "test") .build(); @@ -111,17 +84,24 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa return Collections.singletonList(TestS3RepositoryPlugin.class); } + @Override + protected Map createHttpHandlers() { + return Collections.singletonMap("/bucket", new InternalHttpHandler()); + } + + @Override + protected HttpHandler createErroneousHttpHandler(final HttpHandler delegate) { + return new S3ErroneousHttpHandler(delegate, randomIntBetween(2, 3)); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString(S3ClientSettings.ACCESS_KEY_SETTING.getConcreteSettingForNamespace("test").getKey(), "access"); secureSettings.setString(S3ClientSettings.SECRET_KEY_SETTING.getConcreteSettingForNamespace("test").getKey(), "secret"); - final InetSocketAddress address = httpServer.getAddress(); - final String endpoint = "http://" + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort(); - return Settings.builder() - .put(S3ClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace("test").getKey(), endpoint) + .put(S3ClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace("test").getKey(), httpServerUrl()) // Disable chunked encoding as it simplifies a lot the request parsing on the httpServer side .put(S3ClientSettings.DISABLE_CHUNKED_ENCODING.getConcreteSettingForNamespace("test").getKey(), true) // Disable request throttling because some random values in tests might generate too many failures for the S3 client @@ -131,6 +111,9 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa .build(); } + /** + * S3RepositoryPlugin that allows to disable chunked encoding and to set a low threshold between single upload and multipart upload. + */ public static class TestS3RepositoryPlugin extends S3RepositoryPlugin { public TestS3RepositoryPlugin(final Settings settings) { @@ -143,6 +126,31 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa settings.add(S3ClientSettings.DISABLE_CHUNKED_ENCODING); return settings; } + + @Override + protected S3Repository createRepository(RepositoryMetaData metadata, NamedXContentRegistry registry, ThreadPool threadPool) { + return new S3Repository(metadata, registry, service, threadPool) { + + @Override + public BlobStore blobStore() { + return new BlobStoreWrapper(super.blobStore()) { + @Override + public BlobContainer blobContainer(final BlobPath path) { + return new S3BlobContainer(path, (S3BlobStore) delegate()) { + @Override + long getLargeBlobThresholdInBytes() { + return ByteSizeUnit.MB.toBytes(1L); + } + + @Override + void ensureMultiPartUploadSize(long blobSize) { + } + }; + } + }; + } + }; + } } /** @@ -157,7 +165,65 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa public void handle(final HttpExchange exchange) throws IOException { final String request = exchange.getRequestMethod() + " " + exchange.getRequestURI().toString(); try { - if (Regex.simpleMatch("PUT /bucket/*", request)) { + if (Regex.simpleMatch("POST /bucket/*?uploads", request)) { + final String uploadId = UUIDs.randomBase64UUID(); + byte[] response = ("\n" + + "\n" + + " bucket\n" + + " " + exchange.getRequestURI().getPath() + "\n" + + " " + uploadId + "\n" + + "").getBytes(StandardCharsets.UTF_8); + blobs.put(multipartKey(uploadId, 0), BytesArray.EMPTY); + exchange.getResponseHeaders().add("Content-Type", "application/xml"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); + exchange.getResponseBody().write(response); + + } else if (Regex.simpleMatch("PUT /bucket/*?uploadId=*&partNumber=*", request)) { + final Map params = new HashMap<>(); + RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); + + final String uploadId = params.get("uploadId"); + if (blobs.containsKey(multipartKey(uploadId, 0))) { + final int partNumber = Integer.parseInt(params.get("partNumber")); + MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody()); + blobs.put(multipartKey(uploadId, partNumber), Streams.readFully(md5)); + exchange.getResponseHeaders().add("ETag", Base16.encodeAsString(md5.getMd5Digest())); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); + } else { + exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); + } + + } else if (Regex.simpleMatch("POST /bucket/*?uploadId=*", request)) { + Streams.readFully(exchange.getRequestBody()); + final Map params = new HashMap<>(); + RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); + final String uploadId = params.get("uploadId"); + + final int nbParts = blobs.keySet().stream() + .filter(blobName -> blobName.startsWith(uploadId)) + .map(blobName -> blobName.replaceFirst(uploadId + '\n', "")) + .mapToInt(Integer::parseInt) + .max() + .orElse(0); + + final ByteArrayOutputStream blob = new ByteArrayOutputStream(); + for (int partNumber = 0; partNumber <= nbParts; partNumber++) { + BytesReference part = blobs.remove(multipartKey(uploadId, partNumber)); + assertNotNull(part); + part.writeTo(blob); + } + blobs.put(exchange.getRequestURI().getPath(), new BytesArray(blob.toByteArray())); + + byte[] response = ("\n" + + "\n" + + " bucket\n" + + " " + exchange.getRequestURI().getPath() + "\n" + + "").getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "application/xml"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); + exchange.getResponseBody().write(response); + + }else if (Regex.simpleMatch("PUT /bucket/*", request)) { blobs.put(exchange.getRequestURI().toString(), Streams.readFully(exchange.getRequestBody())); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); @@ -237,51 +303,29 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa exchange.close(); } } + + private static String multipartKey(final String uploadId, int partNumber) { + return uploadId + "\n" + partNumber; + } } /** * HTTP handler that injects random S3 service errors * * Note: it is not a good idea to allow this handler to simulate too many errors as it would - * slow down the test suite and/or could trigger SDK client request throttling (and request - * would fail before reaching the max retry attempts - this can be mitigated by disabling - * {@link S3ClientSettings#USE_THROTTLE_RETRIES_SETTING}) + * slow down the test suite. */ @SuppressForbidden(reason = "this test uses a HttpServer to emulate an S3 endpoint") - private static class ErroneousHttpHandler implements HttpHandler { + private static class S3ErroneousHttpHandler extends ErroneousHttpHandler { - // first key is the remote address, second key is the HTTP request unique id provided by the AWS SDK client, - // value is the number of times the request has been seen - private final Map requests; - private final HttpHandler delegate; - private final int maxErrorsPerRequest; - - private ErroneousHttpHandler(final HttpHandler delegate, final int maxErrorsPerRequest) { - this.requests = new ConcurrentHashMap<>(); - this.delegate = delegate; - this.maxErrorsPerRequest = maxErrorsPerRequest; - assert maxErrorsPerRequest > 1; + S3ErroneousHttpHandler(final HttpHandler delegate, final int maxErrorsPerRequest) { + super(delegate, maxErrorsPerRequest); } @Override - public void handle(final HttpExchange exchange) throws IOException { - final String requestId = exchange.getRequestHeaders().getFirst(AmazonHttpClient.HEADER_SDK_TRANSACTION_ID); - assert Strings.hasText(requestId); - - final int count = requests.computeIfAbsent(requestId, req -> new AtomicInteger(0)).incrementAndGet(); - if (count >= maxErrorsPerRequest || randomBoolean()) { - requests.remove(requestId); - delegate.handle(exchange); - } else { - handleAsError(exchange, requestId); - } - } - - private void handleAsError(final HttpExchange exchange, final String requestId) throws IOException { - Streams.readFully(exchange.getRequestBody()); - exchange.getResponseHeaders().add(Headers.REQUEST_ID, requestId); - exchange.sendResponseHeaders(HttpStatus.SC_INTERNAL_SERVER_ERROR, -1); - exchange.close(); + protected String requestUniqueId(final HttpExchange exchange) { + // Amazon SDK client provides a unique ID per request + return exchange.getRequestHeaders().getFirst(AmazonHttpClient.HEADER_SDK_TRANSACTION_ID); } } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java index 10938504131..076ef4864a0 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java @@ -74,9 +74,10 @@ public class S3BlobStoreTests extends ESBlobStoreTestCase { assertThat(S3BlobStore.initStorageClass(null), equalTo(StorageClass.Standard)); assertThat(S3BlobStore.initStorageClass(""), equalTo(StorageClass.Standard)); - // it should accept [standard, standard_ia, reduced_redundancy, intelligent_tiering] + // it should accept [standard, standard_ia, onezone_ia, reduced_redundancy, intelligent_tiering] assertThat(S3BlobStore.initStorageClass("standard"), equalTo(StorageClass.Standard)); assertThat(S3BlobStore.initStorageClass("standard_ia"), equalTo(StorageClass.StandardInfrequentAccess)); + assertThat(S3BlobStore.initStorageClass("onezone_ia"), equalTo(StorageClass.OneZoneInfrequentAccess)); assertThat(S3BlobStore.initStorageClass("reduced_redundancy"), equalTo(StorageClass.ReducedRedundancy)); assertThat(S3BlobStore.initStorageClass("intelligent_tiering"), equalTo(StorageClass.IntelligentTiering)); } @@ -84,6 +85,7 @@ public class S3BlobStoreTests extends ESBlobStoreTestCase { public void testCaseInsensitiveStorageClass() { assertThat(S3BlobStore.initStorageClass("sTandaRd"), equalTo(StorageClass.Standard)); assertThat(S3BlobStore.initStorageClass("sTandaRd_Ia"), equalTo(StorageClass.StandardInfrequentAccess)); + assertThat(S3BlobStore.initStorageClass("oNeZoNe_iA"), equalTo(StorageClass.OneZoneInfrequentAccess)); assertThat(S3BlobStore.initStorageClass("reduCED_redundancy"), equalTo(StorageClass.ReducedRedundancy)); assertThat(S3BlobStore.initStorageClass("intelLigeNt_tieriNG"), equalTo(StorageClass.IntelligentTiering)); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index 28fd9c72f08..1f0fcc2a97b 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -63,7 +63,15 @@ public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTes .put("base_path", System.getProperty("test.s3.base", "testpath")); final String endpoint = System.getProperty("test.s3.endpoint"); if (endpoint != null) { - settings = settings.put("endpoint", endpoint); + settings.put("endpoint", endpoint); + } else { + // only test different storage classes when running against the default endpoint, i.e. a genuine S3 service + if (randomBoolean()) { + final String storageClass + = randomFrom("standard", "reduced_redundancy", "standard_ia", "onezone_ia", "intelligent_tiering"); + logger.info("--> using storage_class [{}]", storageClass); + settings.put("storage_class", storageClass); + } } AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") .setType("s3") diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index a6023b00770..3264dfce9b3 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -737,6 +737,8 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { ensureGreen(index); // Recovering a synced-flush index from 5.x to 6.x might be subtle as a 5.x index commit does not have all 6.x commit tags. if (randomBoolean()) { + // needs to call a replication action to sync the global checkpoint from primaries to replication. + assertOK(client().performRequest(new Request("POST", "/" + index + "/_refresh"))); // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. assertBusy(() -> { @@ -751,7 +753,10 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { }); } else { // Explicitly flush so we're sure to have a bunch of documents in the Lucene index - assertOK(client().performRequest(new Request("POST", "/_flush"))); + Request flushRequest = new Request("POST", "/" + index + "/_flush"); + flushRequest.addParameter("force", "true"); + flushRequest.addParameter("wait_if_ongoing", "true"); + assertOK(client().performRequest(flushRequest)); } if (shouldHaveTranslog) { // Update a few documents so we are sure to have a translog diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java new file mode 100644 index 00000000000..698f42c43ca --- /dev/null +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/SearchRestCancellationIT.java @@ -0,0 +1,251 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.http; + +import org.apache.logging.log4j.LogManager; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Cancellable; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseListener; +import org.elasticsearch.common.Strings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.script.MockScriptPlugin; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.lookup.LeafFieldsLookup; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + +import static org.elasticsearch.index.query.QueryBuilders.scriptQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.instanceOf; + +public class SearchRestCancellationIT extends HttpSmokeTestCase { + + @Override + protected Collection> nodePlugins() { + List> plugins = new ArrayList<>(); + plugins.add(ScriptedBlockPlugin.class); + plugins.addAll(super.nodePlugins()); + return plugins; + } + + public void testAutomaticCancellationDuringQueryPhase() throws Exception { + Map nodeIdToName = readNodesInfo(); + + List plugins = initBlockFactory(); + indexTestData(); + + Request searchRequest = new Request("GET", "/test/_search"); + SearchSourceBuilder searchSource = new SearchSourceBuilder().query(scriptQuery( + new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap()))); + searchRequest.setJsonEntity(Strings.toString(searchSource)); + + CountDownLatch latch = new CountDownLatch(1); + AtomicReference error = new AtomicReference<>(); + Cancellable cancellable = getRestClient().performRequestAsync(searchRequest, new ResponseListener() { + @Override + public void onSuccess(Response response) { + latch.countDown(); + } + + @Override + public void onFailure(Exception exception) { + error.set(exception); + latch.countDown(); + } + }); + + awaitForBlock(plugins); + cancellable.cancel(); + ensureSearchTaskIsCancelled(nodeIdToName::get); + + disableBlocks(plugins); + latch.await(); + assertThat(error.get(), instanceOf(CancellationException.class)); + } + + public void testAutomaticCancellationDuringFetchPhase() throws Exception { + Map nodeIdToName = readNodesInfo(); + + List plugins = initBlockFactory(); + indexTestData(); + + Request searchRequest = new Request("GET", "/test/_search"); + SearchSourceBuilder searchSource = new SearchSourceBuilder().scriptField("test_field", + new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())); + searchRequest.setJsonEntity(Strings.toString(searchSource)); + + CountDownLatch latch = new CountDownLatch(1); + AtomicReference error = new AtomicReference<>(); + Cancellable cancellable = getRestClient().performRequestAsync(searchRequest, new ResponseListener() { + @Override + public void onSuccess(Response response) { + latch.countDown(); + } + + @Override + public void onFailure(Exception exception) { + error.set(exception); + latch.countDown(); + } + }); + + awaitForBlock(plugins); + cancellable.cancel(); + ensureSearchTaskIsCancelled(nodeIdToName::get); + + disableBlocks(plugins); + latch.await(); + assertThat(error.get(), instanceOf(CancellationException.class)); + } + + private static Map readNodesInfo() { + Map nodeIdToName = new HashMap<>(); + NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().get(); + assertFalse(nodesInfoResponse.hasFailures()); + for (NodeInfo node : nodesInfoResponse.getNodes()) { + nodeIdToName.put(node.getNode().getId(), node.getNode().getName()); + } + return nodeIdToName; + } + + private static void ensureSearchTaskIsCancelled(Function nodeIdToName) throws Exception { + SetOnce searchTask = new SetOnce<>(); + ListTasksResponse listTasksResponse = client().admin().cluster().prepareListTasks().get(); + for (TaskInfo task : listTasksResponse.getTasks()) { + if (task.getAction().equals(SearchAction.NAME)) { + searchTask.set(task); + } + } + assertNotNull(searchTask.get()); + TaskId taskId = searchTask.get().getTaskId(); + String nodeName = nodeIdToName.apply(taskId.getNodeId()); + assertBusy(() -> { + TaskManager taskManager = internalCluster().getInstance(TransportService.class, nodeName).getTaskManager(); + Task task = taskManager.getTask(taskId.getId()); + assertThat(task, instanceOf(CancellableTask.class)); + assertTrue(((CancellableTask)task).isCancelled()); + }); + } + + private static void indexTestData() { + for (int i = 0; i < 5; i++) { + // Make sure we have a few segments + BulkRequestBuilder bulkRequestBuilder = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int j = 0; j < 20; j++) { + bulkRequestBuilder.add(client().prepareIndex("test", "_doc", Integer.toString(i * 5 + j)).setSource("field", "value")); + } + assertNoFailures(bulkRequestBuilder.get()); + } + } + + private static List initBlockFactory() { + List plugins = new ArrayList<>(); + for (PluginsService pluginsService : internalCluster().getDataNodeInstances(PluginsService.class)) { + plugins.addAll(pluginsService.filterPlugins(ScriptedBlockPlugin.class)); + } + for (ScriptedBlockPlugin plugin : plugins) { + plugin.reset(); + plugin.enableBlock(); + } + return plugins; + } + + private void awaitForBlock(List plugins) throws Exception { + int numberOfShards = getNumShards("test").numPrimaries; + assertBusy(() -> { + int numberOfBlockedPlugins = 0; + for (ScriptedBlockPlugin plugin : plugins) { + numberOfBlockedPlugins += plugin.hits.get(); + } + logger.info("The plugin blocked on {} out of {} shards", numberOfBlockedPlugins, numberOfShards); + assertThat(numberOfBlockedPlugins, greaterThan(0)); + }, 10, TimeUnit.SECONDS); + } + + private static void disableBlocks(List plugins) { + for (ScriptedBlockPlugin plugin : plugins) { + plugin.disableBlock(); + } + } + + public static class ScriptedBlockPlugin extends MockScriptPlugin { + static final String SCRIPT_NAME = "search_block"; + + private final AtomicInteger hits = new AtomicInteger(); + + private final AtomicBoolean shouldBlock = new AtomicBoolean(true); + + void reset() { + hits.set(0); + } + + void disableBlock() { + shouldBlock.set(false); + } + + void enableBlock() { + shouldBlock.set(true); + } + + @Override + public Map, Object>> pluginScripts() { + return Collections.singletonMap(SCRIPT_NAME, params -> { + LeafFieldsLookup fieldsLookup = (LeafFieldsLookup) params.get("_fields"); + LogManager.getLogger(SearchRestCancellationIT.class).info("Blocking on the document {}", fieldsLookup.get("_id")); + hits.incrementAndGet(); + try { + awaitBusy(() -> shouldBlock.get() == false); + } catch (Exception e) { + throw new RuntimeException(e); + } + return true; + }); + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json b/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json index fc83866f10e..f484c94246c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/clear_scroll.json @@ -1,7 +1,7 @@ { "clear_scroll":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#request-body-search-scroll", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-request-body.html#_clear_scroll_api", "description":"Explicitly clears the search context for a scroll." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json index 8008509b45b..143ee406025 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/exists_source.json @@ -39,7 +39,6 @@ }, "type":{ "type":"string", - "required":false, "description":"The type of the document; deprecated and optional starting with 7.0", "deprecated":true } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json index e8da013fd94..e5336059d39 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_source.json @@ -39,7 +39,6 @@ }, "type":{ "type":"string", - "required":false, "description":"The type of the document; deprecated and optional starting with 7.0", "deprecated":true } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 6796c0bc3eb..c1f8c95d86d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -10,8 +10,8 @@ { "path":"/{index}/_doc/{id}", "methods":[ - "POST", - "PUT" + "PUT", + "POST" ], "parts":{ "id":{ @@ -60,8 +60,8 @@ { "path":"/{index}/{type}/{id}", "methods":[ - "POST", - "PUT" + "PUT", + "POST" ], "parts":{ "id":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json index 9d4fb9ec46b..d3a249583bd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clone.json @@ -16,12 +16,10 @@ "parts": { "index": { "type": "string", - "required": true, "description": "The name of the source index to clone" }, "target": { "type": "string", - "required": true, "description": "The name of the target index to clone into" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json index d30b1f6f541..e7c98d66451 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json @@ -1,7 +1,7 @@ { "indices.flush_synced":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-flush.html#synced-flush-api", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-synced-flush-api.html", "description":"Performs a synced flush operation on one or more indices." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/info.json index 3a4f4afa765..1c48f05d02e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/info.json @@ -1,7 +1,7 @@ { "info":{ "documentation":{ - "url":"https://www.elastic.co/guide/", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html", "description":"Returns basic information about the cluster." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json b/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json index 67f70f142d7..93dee177e80 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/mtermvectors.json @@ -51,58 +51,48 @@ "params":{ "ids":{ "type":"list", - "description":"A comma-separated list of documents ids. You must define ids as parameter or set \"ids\" or \"docs\" in the request body", - "required":false + "description":"A comma-separated list of documents ids. You must define ids as parameter or set \"ids\" or \"docs\" in the request body" }, "term_statistics":{ "type":"boolean", "description":"Specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", - "default":false, - "required":false + "default":false }, "field_statistics":{ "type":"boolean", "description":"Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", - "default":true, - "required":false + "default":true }, "fields":{ "type":"list", - "description":"A comma-separated list of fields to return. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", - "required":false + "description":"A comma-separated list of fields to return. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\"." }, "offsets":{ "type":"boolean", "description":"Specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", - "default":true, - "required":false + "default":true }, "positions":{ "type":"boolean", "description":"Specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", - "default":true, - "required":false + "default":true }, "payloads":{ "type":"boolean", "description":"Specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", - "default":true, - "required":false + "default":true }, "preference":{ "type":"string", - "description":"Specify the node or shard the operation should be performed on (default: random) .Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", - "required":false + "description":"Specify the node or shard the operation should be performed on (default: random) .Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\"." }, "routing":{ "type":"string", - "description":"Specific routing value. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".", - "required":false + "description":"Specific routing value. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\"." }, "realtime":{ "type":"boolean", - "description":"Specifies if requests are real-time as opposed to near-real-time (default: true).", - "required":false + "description":"Specifies if requests are real-time as opposed to near-real-time (default: true)." }, "version":{ "type":"number", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ping.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ping.json index a77a90e31be..0e787e039d0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ping.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ping.json @@ -1,7 +1,7 @@ { "ping":{ "documentation":{ - "url":"https://www.elastic.co/guide/", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html", "description":"Returns whether the cluster is running." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json index 72e2b871fce..c2c474edd98 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/render_search_template.json @@ -1,7 +1,7 @@ { "render_search_template":{ "documentation":{ - "url":"http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html", + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html#_validating_templates", "description":"Allows to use the Mustache language to pre-render a search definition." }, "stability":"stable", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json index 43c1687b8b5..1d216dcb0b3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json @@ -1,6 +1,9 @@ { "snapshot.cleanup_repository": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "description": "Removes stale data from repository." + }, "stability": "stable", "url": { "paths": [ @@ -12,7 +15,6 @@ "parts": { "repository": { "type": "string", - "required" : true, "description": "A repository name" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json b/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json index 112377ef5c7..dd7fac97d79 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/termvectors.json @@ -87,52 +87,43 @@ "term_statistics":{ "type":"boolean", "description":"Specifies if total term frequency and document frequency should be returned.", - "default":false, - "required":false + "default":false }, "field_statistics":{ "type":"boolean", "description":"Specifies if document count, sum of document frequencies and sum of total term frequencies should be returned.", - "default":true, - "required":false + "default":true }, "fields":{ "type":"list", - "description":"A comma-separated list of fields to return.", - "required":false + "description":"A comma-separated list of fields to return." }, "offsets":{ "type":"boolean", "description":"Specifies if term offsets should be returned.", - "default":true, - "required":false + "default":true }, "positions":{ "type":"boolean", "description":"Specifies if term positions should be returned.", - "default":true, - "required":false + "default":true }, "payloads":{ "type":"boolean", "description":"Specifies if term payloads should be returned.", - "default":true, - "required":false + "default":true }, "preference":{ "type":"string", - "description":"Specify the node or shard the operation should be performed on (default: random).", - "required":false + "description":"Specify the node or shard the operation should be performed on (default: random)." }, "routing":{ "type":"string", - "description":"Specific routing value.", - "required":false + "description":"Specific routing value." }, "realtime":{ "type":"boolean", - "description":"Specifies if request is real-time as opposed to near-real-time (default: true).", - "required":false + "description":"Specifies if request is real-time as opposed to near-real-time (default: true)." }, "version":{ "type":"number", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml index 1444e6153fd..69acf85defa 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml @@ -9,6 +9,7 @@ - do: cluster.health: wait_for_no_initializing_shards: true + wait_for_events: languid - do: indices.stats: metric: [ translog ] @@ -78,6 +79,7 @@ - do: cluster.health: wait_for_no_initializing_shards: true + wait_for_events: languid - do: indices.stats: metric: [ translog ] @@ -142,6 +144,7 @@ - do: cluster.health: wait_for_no_initializing_shards: true + wait_for_events: languid - do: index: index: test @@ -194,6 +197,7 @@ - do: cluster.health: wait_for_no_initializing_shards: true + wait_for_events: languid - do: index: index: test diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index be8bf432633..12f45f71138 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1027,7 +1027,12 @@ public class ElasticsearchException extends RuntimeException implements ToXConte org.elasticsearch.index.shard.ShardNotInPrimaryModeException.class, org.elasticsearch.index.shard.ShardNotInPrimaryModeException::new, 155, - Version.V_6_8_1); + Version.V_6_8_1), + RETENTION_LEASE_INVALID_RETAINING_SEQUENCE_NUMBER_EXCEPTION( + org.elasticsearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException.class, + org.elasticsearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException::new, + 156, + Version.V_7_5_0); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index a2608c84e75..fd4afeaa5e5 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -118,6 +118,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_3_0 = new Version(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final Version V_7_3_1 = new Version(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final Version V_7_3_2 = new Version(7030299, org.apache.lucene.util.Version.LUCENE_8_1_0); + public static final Version V_7_3_3 = new Version(7030399, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final Version V_7_5_0 = new Version(7050099, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final Version CURRENT = V_7_5_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionListener.java b/server/src/main/java/org/elasticsearch/action/ActionListener.java index c21aa3b9d4b..6379d37d5cd 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListener.java @@ -22,6 +22,7 @@ package org.elasticsearch.action; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.CheckedSupplier; import java.util.ArrayList; @@ -226,6 +227,37 @@ public interface ActionListener { }; } + /** + * Wraps a given listener and returns a new listener which executes the provided {@code runBefore} + * callback before the listener is notified via either {@code #onResponse} or {@code #onFailure}. + * If the callback throws an exception then it will be passed to the listener's {@code #onFailure} and its {@code #onResponse} will + * not be executed. + */ + static ActionListener runBefore(ActionListener delegate, CheckedRunnable runBefore) { + return new ActionListener() { + @Override + public void onResponse(Response response) { + try { + runBefore.run(); + } catch (Exception ex) { + delegate.onFailure(ex); + return; + } + delegate.onResponse(response); + } + + @Override + public void onFailure(Exception e) { + try { + runBefore.run(); + } catch (Exception ex) { + e.addSuppressed(ex); + } + delegate.onFailure(e); + } + }; + } + /** * Wraps a given listener and returns a new listener which makes sure {@link #onResponse(Object)} * and {@link #onFailure(Exception)} of the provided listener will be called at most once. diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 129be6857af..544cbb02f79 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -41,6 +41,7 @@ import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.util.Locale; +import java.util.Objects; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; @@ -122,13 +123,13 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr protected final Result result; public DocWriteResponse(ShardId shardId, String type, String id, long seqNo, long primaryTerm, long version, Result result) { - this.shardId = shardId; - this.type = type; - this.id = id; + this.shardId = Objects.requireNonNull(shardId); + this.type = Objects.requireNonNull(type); + this.id = Objects.requireNonNull(id); this.seqNo = seqNo; this.primaryTerm = primaryTerm; this.version = version; - this.result = result; + this.result = Objects.requireNonNull(result); } // needed for deserialization diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 6e8bc766485..28cb6a53df3 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.SparseFixedBitSet; +import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceAlreadyExistsException; @@ -79,6 +80,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -105,6 +107,7 @@ public class TransportBulkAction extends HandledTransportAction indicesMetaData = metaData.indices(); for (DocWriteRequest actionRequest : bulkRequest.requests) { IndexRequest indexRequest = getIndexWriteRequest(actionRequest); + if (indexRequest != null) { - // get pipeline from request - String pipeline = indexRequest.getPipeline(); - if (pipeline == null) { - // start to look for default pipeline via settings found in the index meta data + if (indexRequest.isPipelineResolved() == false) { + final String requestPipeline = indexRequest.getPipeline(); + indexRequest.setPipeline(IngestService.NOOP_PIPELINE_NAME); + boolean requestCanOverridePipeline = true; + String requiredPipeline = null; + // start to look for default or required pipelines via settings found in the index meta data IndexMetaData indexMetaData = indicesMetaData.get(actionRequest.index()); // check the alias for the index request (this is how normal index requests are modeled) if (indexMetaData == null && indexRequest.index() != null) { @@ -186,34 +192,86 @@ public class TransportBulkAction extends HandledTransportAction templates = MetaDataIndexTemplateService.findTemplates(metaData, indexRequest.index()); assert (templates != null); - String defaultPipeline = IngestService.NOOP_PIPELINE_NAME; - // order of templates are highest order first, break if we find a default_pipeline + // order of templates are highest order first, we have to iterate through them all though + String defaultPipeline = null; for (IndexTemplateMetaData template : templates) { final Settings settings = template.settings(); - if (IndexSettings.DEFAULT_PIPELINE.exists(settings)) { + if (requiredPipeline == null && IndexSettings.REQUIRED_PIPELINE.exists(settings)) { + requiredPipeline = IndexSettings.REQUIRED_PIPELINE.get(settings); + requestCanOverridePipeline = false; + // we can not break in case a lower-order template has a default pipeline that we need to reject + } else if (defaultPipeline == null && IndexSettings.DEFAULT_PIPELINE.exists(settings)) { defaultPipeline = IndexSettings.DEFAULT_PIPELINE.get(settings); - break; + // we can not break in case a lower-order template has a required pipeline that we need to reject } } - indexRequest.setPipeline(defaultPipeline); - if (IngestService.NOOP_PIPELINE_NAME.equals(defaultPipeline) == false) { - hasIndexRequestsWithPipelines = true; + if (requiredPipeline != null && defaultPipeline != null) { + // we can not have picked up a required and a default pipeline from applying templates + final String message = String.format( + Locale.ROOT, + "required pipeline [%s] and default pipeline [%s] can not both be set", + requiredPipeline, + defaultPipeline); + throw new IllegalArgumentException(message); + } + final String pipeline; + if (requiredPipeline != null) { + pipeline = requiredPipeline; + } else { + pipeline = defaultPipeline != null ? defaultPipeline : IngestService.NOOP_PIPELINE_NAME; + } + indexRequest.setPipeline(pipeline); + } + + if (requestPipeline != null) { + if (requestCanOverridePipeline == false) { + final String message = String.format( + Locale.ROOT, + "request pipeline [%s] can not override required pipeline [%s]", + requestPipeline, + requiredPipeline); + throw new IllegalArgumentException(message); + } else { + indexRequest.setPipeline(requestPipeline); } } - } else if (IngestService.NOOP_PIPELINE_NAME.equals(pipeline) == false) { + + if (IngestService.NOOP_PIPELINE_NAME.equals(indexRequest.getPipeline()) == false) { + hasIndexRequestsWithPipelines = true; + } + /* + * We have to track whether or not the pipeline for this request has already been resolved. It can happen that the + * pipeline for this request has already been derived yet we execute this loop again. That occurs if the bulk request + * has been forwarded by a non-ingest coordinating node to an ingest node. In this case, the coordinating node will have + * already resolved the pipeline for this request. It is important that we are able to distinguish this situation as we + * can not double-resolve the pipeline because we will not be able to distinguish the case of the pipeline having been + * set from a request pipeline parameter versus having been set by the resolution. We need to be able to distinguish + * these cases as we need to reject the request if the pipeline was set by a required pipeline and there is a request + * pipeline parameter too. + */ + indexRequest.isPipelineResolved(true); + } else if (IngestService.NOOP_PIPELINE_NAME.equals(indexRequest.getPipeline()) == false) { hasIndexRequestsWithPipelines = true; } } + } if (hasIndexRequestsWithPipelines) { @@ -221,6 +279,14 @@ public class TransportBulkAction extends HandledTransportAction implement private String pipeline; + private boolean isPipelineResolved; + /** * Value for {@link #getAutoGeneratedTimestamp()} if the document has an external * provided ID. @@ -131,6 +133,9 @@ public class IndexRequest extends ReplicatedWriteRequest implement version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); pipeline = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_7_5_0)) { + isPipelineResolved = in.readBoolean(); + } isRetry = in.readBoolean(); autoGeneratedTimestamp = in.readLong(); if (in.readBoolean()) { @@ -261,7 +266,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement @Override public String type() { if (type == null) { - return MapperService.SINGLE_MAPPING_NAME; + return MapperService.SINGLE_MAPPING_NAME; } return type; } @@ -290,7 +295,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement type = defaultType; } return this; - } + } /** * The id of the indexed document. If not set, will be automatically generated. */ @@ -345,6 +350,26 @@ public class IndexRequest extends ReplicatedWriteRequest implement return this.pipeline; } + /** + * Sets if the pipeline for this request has been resolved by the coordinating node. + * + * @param isPipelineResolved true if the pipeline has been resolved + * @return the request + */ + public IndexRequest isPipelineResolved(final boolean isPipelineResolved) { + this.isPipelineResolved = isPipelineResolved; + return this; + } + + /** + * Returns whether or not the pipeline for this request has been resolved by the coordinating node. + * + * @return true if the pipeline has been resolved + */ + public boolean isPipelineResolved() { + return this.isPipelineResolved; + } + /** * The source of the document to index, recopied to a new array if it is unsafe. */ @@ -633,8 +658,8 @@ public class IndexRequest extends ReplicatedWriteRequest implement @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - // A 7.x request allows null types but if deserialized in a 6.x node will cause nullpointer exceptions. - // So we use the type accessor method here to make the type non-null (will default it to "_doc"). + // A 7.x request allows null types but if deserialized in a 6.x node will cause nullpointer exceptions. + // So we use the type accessor method here to make the type non-null (will default it to "_doc"). out.writeOptionalString(type()); out.writeOptionalString(id); out.writeOptionalString(routing); @@ -653,6 +678,9 @@ public class IndexRequest extends ReplicatedWriteRequest implement out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); + if (out.getVersion().onOrAfter(Version.V_7_5_0)) { + out.writeBoolean(isPipelineResolved); + } out.writeBoolean(isRetry); out.writeLong(autoGeneratedTimestamp); if (contentType != null) { diff --git a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java index a8cb897f0d3..e7c8e995dd6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java @@ -101,7 +101,7 @@ public final class RepositoryCleanupInProgress extends AbstractNamedDiffable i // the list of snapshot deletion request entries private final List entries; - private SnapshotDeletionsInProgress(List entries) { + public SnapshotDeletionsInProgress(List entries) { this.entries = Collections.unmodifiableList(entries); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java index bbfe64988c7..f0f7c6e5db7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java @@ -56,7 +56,7 @@ public class NodeRemovalClusterStateTaskExecutor implements ClusterStateTaskExec @Override public String toString() { - return node + " " + reason; + return node + " reason: " + reason; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 338bf91f46e..5e2f02afba6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -283,7 +283,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen private final long mappingVersion; private final long settingsVersion; - + private final long aliasesVersion; private final long[] primaryTerms; @@ -1090,25 +1090,25 @@ public class IndexMetaData implements Diffable, ToXContentFragmen this.mappingVersion = mappingVersion; return this; } - + public long settingsVersion() { return settingsVersion; } - + public Builder settingsVersion(final long settingsVersion) { this.settingsVersion = settingsVersion; return this; } - + public long aliasesVersion() { return aliasesVersion; } - + public Builder aliasesVersion(final long aliasesVersion) { this.aliasesVersion = aliasesVersion; return this; } - + /** * returns the primary term for the given shard. * See {@link IndexMetaData#primaryTerm(int)} for more information. diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 8fb325d5947..fd4e7023cfe 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -41,6 +42,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -517,26 +519,18 @@ public class DiscoveryNodes extends AbstractDiffable implements if (summary.length() > 0) { summary.append(", "); } - summary.append("removed {"); - for (DiscoveryNode node : removedNodes()) { - summary.append(node).append(','); - } - summary.append("}"); + summary.append("removed {").append(Strings.collectionToCommaDelimitedString(removedNodes())).append('}'); } if (added()) { - // don't print if there is one added, and it is us - if (!(addedNodes().size() == 1 && addedNodes().get(0).getId().equals(localNodeId))) { + final String addedNodesExceptLocalNode = addedNodes().stream() + .filter(node -> node.getId().equals(localNodeId) == false).map(DiscoveryNode::toString) + .collect(Collectors.joining(",")); + if (addedNodesExceptLocalNode.length() > 0) { + // ignore ourselves when reporting on nodes being added if (summary.length() > 0) { summary.append(", "); } - summary.append("added {"); - for (DiscoveryNode node : addedNodes()) { - if (!node.getId().equals(localNodeId)) { - // don't print ourself - summary.append(node).append(','); - } - } - summary.append("}"); + summary.append("added {").append(addedNodesExceptLocalNode).append('}'); } } return summary.toString(); diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index d42dfec6e7e..c1b5f8dadc3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -239,10 +239,10 @@ public class MasterService extends AbstractLifecycleComponent { // new cluster state, notify all listeners final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta(); if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { - String nodeSummary = nodesDelta.shortSummary(); - if (nodeSummary.length() > 0) { - logger.info("{}, term: {}, version: {}, reason: {}", - summary, newClusterState.term(), newClusterState.version(), nodeSummary); + String nodesDeltaSummary = nodesDelta.shortSummary(); + if (nodesDeltaSummary.length() > 0) { + logger.info("{}, term: {}, version: {}, delta: {}", + summary, newClusterState.term(), newClusterState.version(), nodesDeltaSummary); } } diff --git a/server/src/main/java/org/elasticsearch/common/Rounding.java b/server/src/main/java/org/elasticsearch/common/Rounding.java index 3558b16aac1..07f47a997d7 100644 --- a/server/src/main/java/org/elasticsearch/common/Rounding.java +++ b/server/src/main/java/org/elasticsearch/common/Rounding.java @@ -219,17 +219,21 @@ public abstract class Rounding implements Writeable { static class TimeUnitRounding extends Rounding { static final byte ID = 1; + /** Since, there is no offset of -1 ms, it is safe to use -1 for non-fixed timezones */ + static final long TZ_OFFSET_NON_FIXED = -1; private final DateTimeUnit unit; private final ZoneId timeZone; private final boolean unitRoundsToMidnight; - private final boolean isUtcTimeZone; + /** For fixed offset timezones, this is the offset in milliseconds, otherwise TZ_OFFSET_NON_FIXED */ + private final long fixedOffsetMillis; TimeUnitRounding(DateTimeUnit unit, ZoneId timeZone) { this.unit = unit; this.timeZone = timeZone; this.unitRoundsToMidnight = this.unit.field.getBaseUnit().getDuration().toMillis() > 3600000L; - this.isUtcTimeZone = timeZone.normalized().equals(ZoneOffset.UTC); + this.fixedOffsetMillis = timeZone.getRules().isFixedOffset() == true ? + timeZone.getRules().getOffset(Instant.EPOCH).getTotalSeconds() * 1000 : TZ_OFFSET_NON_FIXED; } TimeUnitRounding(StreamInput in) throws IOException { @@ -277,11 +281,12 @@ public abstract class Rounding implements Writeable { @Override public long round(long utcMillis) { - // this works as long as the offset doesn't change. It is worth getting this case out of the way first, as - // the calculations for fixing things near to offset changes are a little expensive and are unnecessary in the common case - // of working in UTC. - if (isUtcTimeZone) { - return unit.roundFloor(utcMillis); + // This works as long as the tz offset doesn't change. It is worth getting this case out of the way first, + // as the calculations for fixing things near to offset changes are a little expensive and unnecessary + // in the common case of working with fixed offset timezones (such as UTC). + if (fixedOffsetMillis != TZ_OFFSET_NON_FIXED) { + long localMillis = utcMillis + fixedOffsetMillis; + return unit.roundFloor(localMillis) - fixedOffsetMillis; } Instant instant = Instant.ofEpochMilli(utcMillis); @@ -437,20 +442,25 @@ public abstract class Rounding implements Writeable { } static final byte ID = 2; + /** Since, there is no offset of -1 ms, it is safe to use -1 for non-fixed timezones */ + private static final long TZ_OFFSET_NON_FIXED = -1; private final long interval; private final ZoneId timeZone; + /** For fixed offset timezones, this is the offset in milliseconds, otherwise TZ_OFFSET_NON_FIXED */ + private final long fixedOffsetMillis; TimeIntervalRounding(long interval, ZoneId timeZone) { if (interval < 1) throw new IllegalArgumentException("Zero or negative time interval not supported"); this.interval = interval; this.timeZone = timeZone; + this.fixedOffsetMillis = timeZone.getRules().isFixedOffset() == true ? + timeZone.getRules().getOffset(Instant.EPOCH).getTotalSeconds() * 1000 : TZ_OFFSET_NON_FIXED; } TimeIntervalRounding(StreamInput in) throws IOException { - interval = in.readVLong(); - timeZone = DateUtils.of(in.readString()); + this(in.readVLong(), DateUtils.of(in.readString())); } @Override @@ -460,6 +470,14 @@ public abstract class Rounding implements Writeable { @Override public long round(final long utcMillis) { + // This works as long as the tz offset doesn't change. It is worth getting this case out of the way first, + // as the calculations for fixing things near to offset changes are a little expensive and unnecessary + // in the common case of working with fixed offset timezones (such as UTC). + if (fixedOffsetMillis != TZ_OFFSET_NON_FIXED) { + long localMillis = utcMillis + fixedOffsetMillis; + return (roundKey(localMillis, interval) * interval) - fixedOffsetMillis; + } + final Instant utcInstant = Instant.ofEpochMilli(utcMillis); final LocalDateTime rawLocalDateTime = LocalDateTime.ofInstant(utcInstant, timeZone); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreQuery.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreQuery.java index 481a7f666e9..ef07530ab61 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreQuery.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreQuery.java @@ -22,16 +22,17 @@ package org.elasticsearch.common.lucene.search.function; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.Weight; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; import org.elasticsearch.ElasticsearchException; - import java.io.IOException; import java.util.Objects; import java.util.Set; @@ -137,6 +138,11 @@ public class ScriptScoreQuery extends Query { }; } + @Override + public void visit(QueryVisitor visitor) { + // Highlighters must visit the child query to extract terms + subQuery.visit(visitor.getSubVisitor(BooleanClause.Occur.MUST, this)); + } @Override public String toString(String field) { diff --git a/server/src/main/java/org/elasticsearch/common/path/PathTrie.java b/server/src/main/java/org/elasticsearch/common/path/PathTrie.java index 7b6c660a64a..a6ec8c92dae 100644 --- a/server/src/main/java/org/elasticsearch/common/path/PathTrie.java +++ b/server/src/main/java/org/elasticsearch/common/path/PathTrie.java @@ -96,7 +96,12 @@ public class PathTrie { private void updateKeyWithNamedWildcard(String key) { this.key = key; - namedWildcard = key.substring(key.indexOf('{') + 1, key.indexOf('}')); + String newNamedWildcard = key.substring(key.indexOf('{') + 1, key.indexOf('}')); + if (namedWildcard != null && newNamedWildcard.equals(namedWildcard) == false) { + throw new IllegalArgumentException("Trying to use conflicting wildcard names for same path: " + + namedWildcard + " and " + newNamedWildcard); + } + namedWildcard = newNamedWildcard; } private void addInnerChild(String key, TrieNode child) { diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 4dc99c5f9ea..6a95eac713a 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -166,6 +166,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS, IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS, IndexSettings.DEFAULT_PIPELINE, + IndexSettings.REQUIRED_PIPELINE, MetaDataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING, // validate that built-in similarities don't get redefined diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index 44bcd249818..db378922655 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -20,6 +20,8 @@ package org.elasticsearch.common.settings; import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.IndexFormatTooNewException; +import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.BufferedChecksumIndexInput; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.IOContext; @@ -40,7 +42,6 @@ import javax.crypto.SecretKeyFactory; import javax.crypto.spec.GCMParameterSpec; import javax.crypto.spec.PBEKeySpec; import javax.crypto.spec.SecretKeySpec; - import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; @@ -217,7 +218,16 @@ public class KeyStoreWrapper implements SecureSettings { SimpleFSDirectory directory = new SimpleFSDirectory(configDir); try (IndexInput indexInput = directory.openInput(KEYSTORE_FILENAME, IOContext.READONCE)) { ChecksumIndexInput input = new BufferedChecksumIndexInput(indexInput); - int formatVersion = CodecUtil.checkHeader(input, KEYSTORE_FILENAME, MIN_FORMAT_VERSION, FORMAT_VERSION); + final int formatVersion; + try { + formatVersion = CodecUtil.checkHeader(input, KEYSTORE_FILENAME, MIN_FORMAT_VERSION, FORMAT_VERSION); + } catch (IndexFormatTooOldException e) { + throw new IllegalStateException("The Elasticsearch keystore [" + keystoreFile + "] format is too old. " + + "You should delete and recreate it in order to upgrade.", e); + } catch (IndexFormatTooNewException e) { + throw new IllegalStateException("The Elasticsearch keystore [" + keystoreFile + "] format is too new. " + + "Are you trying to downgrade? You should delete and recreate it in order to downgrade.", e); + } byte hasPasswordByte = input.readByte(); boolean hasPassword = hasPasswordByte == 1; if (hasPassword == false && hasPasswordByte != 0) { diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index 178d3c8e354..739fe1913bb 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; -import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -152,12 +151,12 @@ public class DiscoveryModule { if (ZEN2_DISCOVERY_TYPE.equals(discoveryType) || SINGLE_NODE_DISCOVERY_TYPE.equals(discoveryType)) { discovery = new Coordinator(NODE_NAME_SETTING.get(settings), settings, clusterSettings, - transportService, namedWriteableRegistry, allocationService, masterService, - () -> gatewayMetaState.getPersistedState(settings, (ClusterApplierService) clusterApplier), seedHostsProvider, - clusterApplier, joinValidators, new Random(Randomness.get().nextLong()), rerouteService, electionStrategy); + transportService, namedWriteableRegistry, allocationService, masterService, gatewayMetaState::getPersistedState, + seedHostsProvider, clusterApplier, joinValidators, new Random(Randomness.get().nextLong()), rerouteService, + electionStrategy); } else if (ZEN_DISCOVERY_TYPE.equals(discoveryType)) { discovery = new ZenDiscovery(settings, threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, - clusterSettings, seedHostsProvider, allocationService, joinValidators, gatewayMetaState, rerouteService); + clusterSettings, seedHostsProvider, allocationService, joinValidators, rerouteService); } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 3beaea7c3dc..42a5fab7703 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.coordination.JoinTaskExecutor; import org.elasticsearch.cluster.coordination.NoMasterBlockService; import org.elasticsearch.cluster.coordination.NodeRemovalClusterStateTaskExecutor; @@ -40,7 +41,6 @@ import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener; -import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -59,10 +59,8 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.discovery.SeedHostsProvider; import org.elasticsearch.discovery.zen.PublishClusterStateAction.IncomingClusterStateListener; -import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.EmptyTransportResponseHandler; @@ -165,8 +163,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover public ZenDiscovery(Settings settings, ThreadPool threadPool, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, MasterService masterService, ClusterApplier clusterApplier, ClusterSettings clusterSettings, SeedHostsProvider hostsProvider, AllocationService allocationService, - Collection> onJoinValidators, GatewayMetaState gatewayMetaState, - RerouteService rerouteService) { + Collection> onJoinValidators, RerouteService rerouteService) { this.onJoinValidators = JoinTaskExecutor.addBuiltInJoinValidators(onJoinValidators); this.masterService = masterService; this.clusterApplier = clusterApplier; @@ -234,10 +231,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover transportService.registerRequestHandler( DISCOVERY_REJOIN_ACTION_NAME, ThreadPool.Names.SAME, RejoinClusterRequest::new, new RejoinClusterRequestHandler()); - - if (clusterApplier instanceof ClusterApplierService) { - ((ClusterApplierService) clusterApplier).addLowPriorityApplier(gatewayMetaState); - } } // protected to allow overriding in tests diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 91bcb68370e..c6e9182fd8f 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -23,12 +23,12 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateApplier; -import org.elasticsearch.cluster.coordination.CoordinationState; import org.elasticsearch.cluster.coordination.CoordinationState.PersistedState; import org.elasticsearch.cluster.coordination.InMemoryPersistedState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -38,12 +38,12 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.index.Index; import org.elasticsearch.plugins.MetaDataUpgrader; import org.elasticsearch.transport.TransportService; @@ -71,41 +71,71 @@ import java.util.function.UnaryOperator; * elected as master, it requests metaData from other master eligible nodes. After that, master node performs re-conciliation on the * gathered results, re-creates {@link ClusterState} and broadcasts this state to other nodes in the cluster. */ -public class GatewayMetaState implements ClusterStateApplier, CoordinationState.PersistedState { +public class GatewayMetaState implements PersistedState { protected static final Logger logger = LogManager.getLogger(GatewayMetaState.class); private final MetaStateService metaStateService; private final Settings settings; - private final ClusterService clusterService; - private final TransportService transportService; - //there is a single thread executing updateClusterState calls, hence no volatile modifier + // On master-eligible Zen2 nodes, we use this very object for the PersistedState (so that the state is actually persisted); on other + // nodes we use an InMemoryPersistedState instead and persist using a cluster applier if needed. In all cases it's an error to try and + // use this object as a PersistedState before calling start(). TODO stop implementing PersistedState at the top level. + private final SetOnce persistedState = new SetOnce<>(); + + // on master-eligible nodes we call updateClusterState under the Coordinator's mutex; on master-ineligible data nodes we call + // updateClusterState on the (unique) cluster applier thread; on other nodes we never call updateClusterState. In all cases there's no + // need to synchronize access to these variables. protected Manifest previousManifest; protected ClusterState previousClusterState; protected boolean incrementalWrite; - public GatewayMetaState(Settings settings, MetaStateService metaStateService, - MetaDataIndexUpgradeService metaDataIndexUpgradeService, MetaDataUpgrader metaDataUpgrader, - TransportService transportService, ClusterService clusterService) throws IOException { + public GatewayMetaState(Settings settings, MetaStateService metaStateService) { this.settings = settings; this.metaStateService = metaStateService; - this.transportService = transportService; - this.clusterService = clusterService; - - upgradeMetaData(metaDataIndexUpgradeService, metaDataUpgrader); - initializeClusterState(ClusterName.CLUSTER_NAME_SETTING.get(settings)); - incrementalWrite = false; } - public PersistedState getPersistedState(Settings settings, ClusterApplierService clusterApplierService) { - applyClusterStateUpdaters(); - if (DiscoveryNode.isMasterNode(settings) == false) { - // use Zen1 way of writing cluster state for non-master-eligible nodes - // this avoids concurrent manipulating of IndexMetadata with IndicesStore - clusterApplierService.addLowPriorityApplier(this); - return new InMemoryPersistedState(getCurrentTerm(), getLastAcceptedState()); + public void start(TransportService transportService, ClusterService clusterService, + MetaDataIndexUpgradeService metaDataIndexUpgradeService, MetaDataUpgrader metaDataUpgrader) { + assert previousClusterState == null : "should only start once, but already have " + previousClusterState; + try { + upgradeMetaData(metaDataIndexUpgradeService, metaDataUpgrader); + initializeClusterState(ClusterName.CLUSTER_NAME_SETTING.get(settings)); + } catch (IOException e) { + throw new ElasticsearchException("failed to load metadata", e); + } + incrementalWrite = false; + + applyClusterStateUpdaters(transportService, clusterService); + if (DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings).equals(DiscoveryModule.ZEN_DISCOVERY_TYPE)) { + // only for tests that simulate a mixed Zen1/Zen2 clusters, see Zen1IT + if (isMasterOrDataNode()) { + clusterService.addLowPriorityApplier(this::applyClusterState); + } + persistedState.set(new InMemoryPersistedState(getCurrentTerm(), getLastAcceptedState())); + } else { + if (DiscoveryNode.isMasterNode(settings) == false) { + if (DiscoveryNode.isDataNode(settings)) { + // Master-eligible nodes persist index metadata for all indices regardless of whether they hold any shards or not. It's + // vitally important to the safety of the cluster coordination system that master-eligible nodes persist this metadata + // when _accepting_ the cluster state (i.e. before it is committed). This persistence happens on the generic threadpool. + // + // In contrast, master-ineligible data nodes only persist the index metadata for shards that they hold. When all shards + // of an index are moved off such a node the IndicesStore is responsible for removing the corresponding index directory, + // including the metadata, and does so on the cluster applier thread. + // + // This presents a problem: if a shard is unassigned from a node and then reassigned back to it again then there is a + // race between the IndicesStore deleting the index folder and the CoordinationState concurrently trying to write the + // updated metadata into it. We could probably solve this with careful synchronization, but in fact there is no need. + // The persisted state on master-ineligible data nodes is mostly ignored - it's only there to support dangling index + // imports, which is inherently unsafe anyway. Thus we can safely delay metadata writes on master-ineligible data nodes + // until applying the cluster state, which is what this does: + clusterService.addLowPriorityApplier(this::applyClusterState); + } + persistedState.set(new InMemoryPersistedState(getCurrentTerm(), getLastAcceptedState())); + } else { + persistedState.set(this); + } } - return this; } private void initializeClusterState(ClusterName clusterName) throws IOException { @@ -122,7 +152,7 @@ public class GatewayMetaState implements ClusterStateApplier, CoordinationState. logger.debug("took {} to load state", TimeValue.timeValueMillis(TimeValue.nsecToMSec(System.nanoTime() - startNS))); } - public void applyClusterStateUpdaters() { + protected void applyClusterStateUpdaters(TransportService transportService, ClusterService clusterService) { assert previousClusterState.nodes().getLocalNode() == null : "applyClusterStateUpdaters must only be called once"; assert transportService.getLocalNode() != null : "transport service is not yet started"; @@ -181,15 +211,18 @@ public class GatewayMetaState implements ClusterStateApplier, CoordinationState. return DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings); } + public PersistedState getPersistedState() { + final PersistedState persistedState = this.persistedState.get(); + assert persistedState != null : "not started"; + return persistedState; + } + public MetaData getMetaData() { return previousClusterState.metaData(); } - @Override - public void applyClusterState(ClusterChangedEvent event) { - if (isMasterOrDataNode() == false) { - return; - } + private void applyClusterState(ClusterChangedEvent event) { + assert isMasterOrDataNode(); if (event.state().blocks().disableStatePersistence()) { incrementalWrite = false; diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index e14a718f083..da470a04afa 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -22,7 +22,6 @@ package org.elasticsearch.index; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Sort; import org.apache.lucene.store.AlreadyClosedException; @@ -523,25 +522,17 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust return indexSettings; } - private IndexSearcher newCachedSearcher(int shardId, IndexReaderContext context) { - IndexSearcher searcher = new IndexSearcher(context); - searcher.setQueryCache(cache().query()); - searcher.setQueryCachingPolicy(getShard(shardId).getQueryCachingPolicy()); - return searcher; - } - /** * Creates a new QueryShardContext. The context has not types set yet, if types are required set them via * {@link QueryShardContext#setTypes(String...)}. * - * Passing a {@code null} {@link IndexReader} will return a valid context, however it won't be able to make + * Passing a {@code null} {@link IndexSearcher} will return a valid context, however it won't be able to make * {@link IndexReader}-specific optimizations, such as rewriting containing range queries. */ - public QueryShardContext newQueryShardContext(int shardId, IndexReader indexReader, LongSupplier nowInMillis, String clusterAlias) { + public QueryShardContext newQueryShardContext(int shardId, IndexSearcher searcher, LongSupplier nowInMillis, String clusterAlias) { return new QueryShardContext( - shardId, indexSettings, indexCache.bitsetFilterCache(), context -> newCachedSearcher(shardId, context), - indexFieldData::getForField, mapperService(), similarityService(), scriptService, xContentRegistry, namedWriteableRegistry, - client, indexReader, nowInMillis, clusterAlias); + shardId, indexSettings, bigArrays, indexCache.bitsetFilterCache(), indexFieldData::getForField, mapperService(), + similarityService(), scriptService, xContentRegistry, namedWriteableRegistry, client, searcher, nowInMillis, clusterAlias); } /** diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 19e99a65eb5..ab7a4fc9e64 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -35,8 +35,10 @@ import org.elasticsearch.ingest.IngestService; import org.elasticsearch.node.Node; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Function; @@ -299,12 +301,67 @@ public final class IndexSettings { 1000, 1, Property.Dynamic, Property.IndexScope); public static final Setting DEFAULT_PIPELINE = - new Setting<>("index.default_pipeline", IngestService.NOOP_PIPELINE_NAME, s -> { - if (s == null || s.isEmpty()) { - throw new IllegalArgumentException("Value for [index.default_pipeline] must be a non-empty string."); - } - return s; - }, Property.Dynamic, Property.IndexScope); + new Setting<>("index.default_pipeline", + IngestService.NOOP_PIPELINE_NAME, + Function.identity(), + new DefaultPipelineValidator(), + Property.Dynamic, + Property.IndexScope); + + public static final Setting REQUIRED_PIPELINE = + new Setting<>("index.required_pipeline", + IngestService.NOOP_PIPELINE_NAME, + Function.identity(), + new RequiredPipelineValidator(), + Property.Dynamic, + Property.IndexScope); + + static class DefaultPipelineValidator implements Setting.Validator { + + @Override + public void validate(final String value) { + + } + + @Override + public void validate(final String value, final Map, String> settings) { + final String requiredPipeline = settings.get(IndexSettings.REQUIRED_PIPELINE); + if (value.equals(IngestService.NOOP_PIPELINE_NAME) == false + && requiredPipeline.equals(IngestService.NOOP_PIPELINE_NAME) == false) { + throw new IllegalArgumentException( + "index has a default pipeline [" + value + "] and a required pipeline [" + requiredPipeline + "]"); + } + } + + @Override + public Iterator> settings() { + return Collections.singletonList(REQUIRED_PIPELINE).iterator(); + } + + } + + static class RequiredPipelineValidator implements Setting.Validator { + + @Override + public void validate(final String value) { + + } + + @Override + public void validate(final String value, final Map, String> settings) { + final String defaultPipeline = settings.get(IndexSettings.DEFAULT_PIPELINE); + if (value.equals(IngestService.NOOP_PIPELINE_NAME) && defaultPipeline.equals(IngestService.NOOP_PIPELINE_NAME) == false) { + throw new IllegalArgumentException( + "index has a required pipeline [" + value + "] and a default pipeline [" + defaultPipeline + "]"); + } + } + + @Override + public Iterator> settings() { + return Collections.singletonList(DEFAULT_PIPELINE).iterator(); + } + + } /** * Marks an index to be searched throttled. This means that never more than one shard of such an index will be searched concurrently @@ -384,6 +441,7 @@ public final class IndexSettings { private volatile int maxAnalyzedOffset; private volatile int maxTermsCount; private volatile String defaultPipeline; + private volatile String requiredPipeline; private volatile boolean searchThrottled; /** @@ -555,6 +613,7 @@ public final class IndexSettings { scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_IDLE_AFTER, this::setSearchIdleAfter); scopedSettings.addSettingsUpdateConsumer(MAX_REGEX_LENGTH_SETTING, this::setMaxRegexLength); scopedSettings.addSettingsUpdateConsumer(DEFAULT_PIPELINE, this::setDefaultPipeline); + scopedSettings.addSettingsUpdateConsumer(REQUIRED_PIPELINE, this::setRequiredPipeline); scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING, this::setSoftDeleteRetentionOperations); scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_THROTTLED, this::setSearchThrottled); scopedSettings.addSettingsUpdateConsumer(INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING, this::setRetentionLeaseMillis); @@ -746,7 +805,7 @@ public final class IndexSettings { public void setTranslogSyncInterval(TimeValue translogSyncInterval) { this.syncInterval = translogSyncInterval; } - + /** * Returns this interval in which the shards of this index are asynchronously refreshed. {@code -1} means async refresh is disabled. */ @@ -825,7 +884,7 @@ public final class IndexSettings { * Returns the max number of filters in adjacency_matrix aggregation search requests * @deprecated This setting will be removed in 8.0 */ - @Deprecated + @Deprecated public int getMaxAdjacencyMatrixFilters() { return this.maxAdjacencyMatrixFilters; } @@ -834,7 +893,7 @@ public final class IndexSettings { * @param maxAdjacencyFilters the max number of filters in adjacency_matrix aggregation search requests * @deprecated This setting will be removed in 8.0 */ - @Deprecated + @Deprecated private void setMaxAdjacencyMatrixFilters(int maxAdjacencyFilters) { this.maxAdjacencyMatrixFilters = maxAdjacencyFilters; } @@ -992,6 +1051,14 @@ public final class IndexSettings { this.defaultPipeline = defaultPipeline; } + public String getRequiredPipeline() { + return requiredPipeline; + } + + public void setRequiredPipeline(final String requiredPipeline) { + this.requiredPipeline = requiredPipeline; + } + /** * Returns true if soft-delete is enabled. */ diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java index 11388190c30..72687353214 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java @@ -20,9 +20,7 @@ package org.elasticsearch.index.fielddata; import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparatorSource; @@ -47,7 +45,6 @@ import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.sort.NestedSortBuilder; import java.io.IOException; -import java.util.function.Function; /** * Thread-safe utility class that allows to get per-segment values via the @@ -115,24 +112,19 @@ public interface IndexFieldData extends IndexCompone private final BitSetProducer rootFilter; private final Query innerQuery; private final NestedSortBuilder nestedSort; - private final Function searcherFactory; + private final IndexSearcher searcher; - public Nested(BitSetProducer rootFilter, Query innerQuery, NestedSortBuilder nestedSort, - Function searcherFactory) { + public Nested(BitSetProducer rootFilter, Query innerQuery, NestedSortBuilder nestedSort, IndexSearcher searcher) { this.rootFilter = rootFilter; this.innerQuery = innerQuery; this.nestedSort = nestedSort; - this.searcherFactory = searcherFactory; + this.searcher = searcher; } public Query getInnerQuery() { return innerQuery; } - public BitSetProducer getRootFilter() { - return rootFilter; - } - public NestedSortBuilder getNestedSort() { return nestedSort; } /** @@ -146,9 +138,7 @@ public interface IndexFieldData extends IndexCompone * Get a {@link DocIdSet} that matches the inner documents. */ public DocIdSetIterator innerDocs(LeafReaderContext ctx) throws IOException { - final IndexReaderContext topLevelCtx = ReaderUtil.getTopLevelContext(ctx); - IndexSearcher indexSearcher = searcherFactory.apply(topLevelCtx); - Weight weight = indexSearcher.createWeight(indexSearcher.rewrite(innerQuery), ScoreMode.COMPLETE_NO_SCORES, 1f); + Weight weight = searcher.createWeight(searcher.rewrite(innerQuery), ScoreMode.COMPLETE_NO_SCORES, 1f); Scorer s = weight.scorer(ctx); return s == null ? null : s.iterator(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 32c44fd5f55..9e886791ffa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -96,6 +96,11 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { } public static class TypeParser implements MetadataFieldMapper.TypeParser { + + public static final String ENABLED_DEPRECATION_MESSAGE = "Index [{}] uses the deprecated `enabled` setting for `_field_names`. " + + "Disabling _field_names is not necessary because it no longer carries a large index overhead. Support for this setting " + + "will be removed in a future major version. Please remove it from your mappings and templates."; + @Override public MetadataFieldMapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { @@ -106,6 +111,8 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { String fieldName = entry.getKey(); Object fieldNode = entry.getValue(); if (fieldName.equals("enabled")) { + String indexName = parserContext.mapperService().index().getName(); + deprecationLogger.deprecatedAndMaybeLog("field_names_enabled_parameter", ENABLED_DEPRECATION_MESSAGE, indexName); builder.enabled(XContentMapValues.nodeBooleanValue(fieldNode, name + ".enabled")); iterator.remove(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeIndexer.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeIndexer.java index f0068f29e33..78c1b995683 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeIndexer.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeIndexer.java @@ -229,7 +229,7 @@ public final class GeoShapeIndexer implements AbstractGeometryFieldMapper.Indexe double[] lons = new double[partMinus.length()]; for (int i = 0; i < partMinus.length(); i++) { lats[i] = normalizeLat(partMinus.getY(i)); - lons[i] = normalizeLon(partMinus.getX(i)); + lons[i] = normalizeLonMinus180Inclusive(partMinus.getX(i)); } lines.add(new Line(lons, lats)); } @@ -274,7 +274,7 @@ public final class GeoShapeIndexer implements AbstractGeometryFieldMapper.Indexe lons[offset + i - 1] = intersection.getX(); lats[offset + i - 1] = intersection.getY(); - shift(shift, lons); + shift(shift, partLons); offset = i - 1; shift = lons[i] > DATELINE ? DATELINE : (lons[i] < -DATELINE ? -DATELINE : 0); } else { @@ -926,7 +926,7 @@ public final class GeoShapeIndexer implements AbstractGeometryFieldMapper.Indexe for (int i = 0; i < shell.length; ++i) { //Lucene Tessellator treats different +180 and -180 and we should keep the sign. //normalizeLon method excludes -180. - x[i] = Math.abs(shell[i].getX()) > 180 ? normalizeLon(shell[i].getX()) : shell[i].getX(); + x[i] = normalizeLonMinus180Inclusive(shell[i].getX()); y[i] = normalizeLat(shell[i].getY()); } @@ -1043,4 +1043,11 @@ public final class GeoShapeIndexer implements AbstractGeometryFieldMapper.Indexe } return new org.apache.lucene.geo.Polygon(polygon.getPolygon().getY(), polygon.getPolygon().getX(), holes); } + + /** + * Normalizes longitude while accepting -180 degrees as a valid value + */ + private static double normalizeLonMinus180Inclusive(double lon) { + return Math.abs(lon) > 180 ? normalizeLon(lon) : lon; + } } diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java index 9e6a766aed8..92ba3c5d2b8 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java @@ -29,7 +29,6 @@ import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.sort.SortBuilder; import java.io.IOException; -import java.util.HashMap; import java.util.Map; import java.util.Optional; @@ -47,9 +46,9 @@ public abstract class InnerHitContextBuilder { this.query = query; } - public final void build(SearchContext parentSearchContext, InnerHitsContext innerHitsContext) throws IOException { + public final void validate(QueryShardContext queryShardContext) { long innerResultWindow = innerHitBuilder.getFrom() + innerHitBuilder.getSize(); - int maxInnerResultWindow = parentSearchContext.mapperService().getIndexSettings().getMaxInnerResultWindow(); + int maxInnerResultWindow = queryShardContext.getIndexSettings().getMaxInnerResultWindow(); if (innerResultWindow > maxInnerResultWindow) { throw new IllegalArgumentException( "Inner result window is too large, the inner hit definition's [" + innerHitBuilder.getName() + @@ -58,10 +57,12 @@ public abstract class InnerHitContextBuilder { "] index level setting." ); } - doBuild(parentSearchContext, innerHitsContext); + doValidate(queryShardContext); } - protected abstract void doBuild(SearchContext parentSearchContext, InnerHitsContext innerHitsContext) throws IOException; + protected abstract void doValidate(QueryShardContext queryShardContext); + + public abstract void build(SearchContext parentSearchContext, InnerHitsContext innerHitsContext) throws IOException; public static void extractInnerHits(QueryBuilder query, Map innerHitBuilders) { if (query instanceof AbstractQueryBuilder) { @@ -109,23 +110,6 @@ public abstract class InnerHitContextBuilder { } ParsedQuery parsedQuery = new ParsedQuery(query.toQuery(queryShardContext), queryShardContext.copyNamedQueries()); innerHitsContext.parsedQuery(parsedQuery); - Map baseChildren = - buildChildInnerHits(innerHitsContext.parentSearchContext(), children); - innerHitsContext.setChildInnerHits(baseChildren); - } - - private static Map buildChildInnerHits(SearchContext parentSearchContext, - Map children) throws IOException { - - Map childrenInnerHits = new HashMap<>(); - for (Map.Entry entry : children.entrySet()) { - InnerHitsContext childInnerHitsContext = new InnerHitsContext(); - entry.getValue().build( - parentSearchContext, childInnerHitsContext); - if (childInnerHitsContext.getInnerHits() != null) { - childrenInnerHits.putAll(childInnerHitsContext.getInnerHits()); - } - } - return childrenInnerHits; + innerHitsContext.innerHits(children); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index ee8062308ac..b2cc46d5640 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -332,28 +332,34 @@ public class NestedQueryBuilder extends AbstractQueryBuilder static class NestedInnerHitContextBuilder extends InnerHitContextBuilder { private final String path; - NestedInnerHitContextBuilder(String path, QueryBuilder query, InnerHitBuilder innerHitBuilder, - Map children) { + NestedInnerHitContextBuilder(String path, + QueryBuilder query, + InnerHitBuilder innerHitBuilder, + Map children) { super(query, innerHitBuilder, children); this.path = path; } @Override - protected void doBuild(SearchContext parentSearchContext, - InnerHitsContext innerHitsContext) throws IOException { - QueryShardContext queryShardContext = parentSearchContext.getQueryShardContext(); + public void doValidate(QueryShardContext queryShardContext) { + if (queryShardContext.getObjectMapper(path) == null + && innerHitBuilder.isIgnoreUnmapped() == false) { + throw new IllegalStateException("[" + query.getName() + "] no mapping found for type [" + path + "]"); + } + } + + @Override + public void build(SearchContext searchContext, InnerHitsContext innerHitsContext) throws IOException { + QueryShardContext queryShardContext = searchContext.getQueryShardContext(); ObjectMapper nestedObjectMapper = queryShardContext.getObjectMapper(path); if (nestedObjectMapper == null) { - if (innerHitBuilder.isIgnoreUnmapped() == false) { - throw new IllegalStateException("[" + query.getName() + "] no mapping found for type [" + path + "]"); - } else { - return; - } + assert innerHitBuilder.isIgnoreUnmapped() : "should be validated first"; + return; } String name = innerHitBuilder.getName() != null ? innerHitBuilder.getName() : nestedObjectMapper.fullPath(); ObjectMapper parentObjectMapper = queryShardContext.nestedScope().nextLevel(nestedObjectMapper); NestedInnerHitSubContext nestedInnerHits = new NestedInnerHitSubContext( - name, parentSearchContext, parentObjectMapper, nestedObjectMapper + name, searchContext, parentObjectMapper, nestedObjectMapper ); setupInnerHitsContext(queryShardContext, nestedInnerHits); queryShardContext.nestedScope().previousLevel(); @@ -388,7 +394,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder SearchHit hit = hits[i]; Query rawParentFilter; if (parentObjectMapper == null) { - rawParentFilter = Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated()); + rawParentFilter = Queries.newNonNestedFilter(indexShard().indexSettings().getIndexVersionCreated()); } else { rawParentFilter = parentObjectMapper.nestedTypeFilter(); } @@ -399,9 +405,9 @@ public class NestedQueryBuilder extends AbstractQueryBuilder LeafReaderContext ctx = searcher().getIndexReader().leaves().get(readerIndex); Query childFilter = childObjectMapper.nestedTypeFilter(); - BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter); + BitSetProducer parentFilter = bitsetFilterCache().getBitSetProducer(rawParentFilter); Query q = new ParentChildrenBlockJoinQuery(parentFilter, childFilter, parentDocId); - Weight weight = context.searcher().createWeight(context.searcher().rewrite(q), + Weight weight = searcher().createWeight(searcher().rewrite(q), org.apache.lucene.search.ScoreMode.COMPLETE_NO_SCORES, 1f); if (size() == 0) { TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); @@ -409,7 +415,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder result[i] = new TopDocsAndMaxScore(new TopDocs(new TotalHits(totalHitCountCollector.getTotalHits(), TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN); } else { - int topN = Math.min(from() + size(), context.searcher().getIndexReader().maxDoc()); + int topN = Math.min(from() + size(), searcher().getIndexReader().maxDoc()); TopDocsCollector topDocsCollector; MaxScoreCollector maxScoreCollector = null; if (sort() != null) { diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index c675aeff332..a631ea319b4 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.query; import org.apache.logging.log4j.LogManager; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitSetProducer; @@ -37,6 +36,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; @@ -68,7 +68,6 @@ import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; import java.util.function.BiFunction; -import java.util.function.Function; import java.util.function.LongSupplier; import static java.util.Collections.unmodifiableMap; @@ -84,13 +83,13 @@ public class QueryShardContext extends QueryRewriteContext { private final ScriptService scriptService; private final IndexSettings indexSettings; + private final BigArrays bigArrays; private final MapperService mapperService; private final SimilarityService similarityService; private final BitsetFilterCache bitsetFilterCache; - private final Function searcherFactory; private final BiFunction> indexFieldDataService; private final int shardId; - private final IndexReader reader; + private final IndexSearcher searcher; private String[] types = Strings.EMPTY_ARRAY; private boolean cacheable = true; private final SetOnce frozen = new SetOnce<>(); @@ -109,42 +108,58 @@ public class QueryShardContext extends QueryRewriteContext { private boolean mapUnmappedFieldAsString; private NestedScope nestedScope; - public QueryShardContext(int shardId, IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, - Function searcherFactory, - BiFunction> indexFieldDataLookup, MapperService mapperService, - SimilarityService similarityService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, - NamedWriteableRegistry namedWriteableRegistry, Client client, IndexReader reader, LongSupplier nowInMillis, - String clusterAlias) { - this(shardId, indexSettings, bitsetFilterCache, searcherFactory, indexFieldDataLookup, mapperService, similarityService, - scriptService, xContentRegistry, namedWriteableRegistry, client, reader, nowInMillis, + public QueryShardContext(int shardId, + IndexSettings indexSettings, + BigArrays bigArrays, + BitsetFilterCache bitsetFilterCache, + BiFunction> indexFieldDataLookup, + MapperService mapperService, + SimilarityService similarityService, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + NamedWriteableRegistry namedWriteableRegistry, + Client client, + IndexSearcher searcher, + LongSupplier nowInMillis, + String clusterAlias) { + this(shardId, indexSettings, bigArrays, bitsetFilterCache, indexFieldDataLookup, mapperService, similarityService, + scriptService, xContentRegistry, namedWriteableRegistry, client, searcher, nowInMillis, new Index(RemoteClusterAware.buildRemoteIndexName(clusterAlias, indexSettings.getIndex().getName()), indexSettings.getIndex().getUUID())); } public QueryShardContext(QueryShardContext source) { - this(source.shardId, source.indexSettings, source.bitsetFilterCache, source.searcherFactory, source.indexFieldDataService, - source.mapperService, source.similarityService, source.scriptService, source.getXContentRegistry(), - source.getWriteableRegistry(), source.client, source.reader, source.nowInMillis, source.fullyQualifiedIndex); + this(source.shardId, source.indexSettings, source.bigArrays, source.bitsetFilterCache, source.indexFieldDataService, + source.mapperService, source.similarityService, source.scriptService, source.getXContentRegistry(), + source.getWriteableRegistry(), source.client, source.searcher, source.nowInMillis, source.fullyQualifiedIndex); } - private QueryShardContext(int shardId, IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, - Function searcherFactory, - BiFunction> indexFieldDataLookup, MapperService mapperService, - SimilarityService similarityService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, - NamedWriteableRegistry namedWriteableRegistry, Client client, IndexReader reader, LongSupplier nowInMillis, - Index fullyQualifiedIndex) { + private QueryShardContext(int shardId, + IndexSettings indexSettings, + BigArrays bigArrays, + BitsetFilterCache bitsetFilterCache, + BiFunction> indexFieldDataLookup, + MapperService mapperService, + SimilarityService similarityService, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + NamedWriteableRegistry namedWriteableRegistry, + Client client, + IndexSearcher searcher, + LongSupplier nowInMillis, + Index fullyQualifiedIndex) { super(xContentRegistry, namedWriteableRegistry, client, nowInMillis); this.shardId = shardId; this.similarityService = similarityService; this.mapperService = mapperService; + this.bigArrays = bigArrays; this.bitsetFilterCache = bitsetFilterCache; - this.searcherFactory = searcherFactory; this.indexFieldDataService = indexFieldDataLookup; this.allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields(); this.nestedScope = new NestedScope(); this.scriptService = scriptService; this.indexSettings = indexSettings; - this.reader = reader; + this.searcher = searcher; this.fullyQualifiedIndex = fullyQualifiedIndex; } @@ -183,10 +198,6 @@ public class QueryShardContext extends QueryRewriteContext { return bitsetFilterCache.getBitSetProducer(filter); } - public IndexSearcher newCachedSearcher(IndexReaderContext context) { - return searcherFactory.apply(context); - } - public > IFD getForField(MappedFieldType fieldType) { return (IFD) indexFieldDataService.apply(fieldType, fullyQualifiedIndex.getName()); } @@ -424,7 +435,13 @@ public class QueryShardContext extends QueryRewriteContext { /** Return the current {@link IndexReader}, or {@code null} if no index reader is available, * for instance if this rewrite context is used to index queries (percolation). */ public IndexReader getIndexReader() { - return reader; + return searcher == null ? null : searcher.getIndexReader(); + } + + /** Return the current {@link IndexSearcher}, or {@code null} if no index reader is available, + * for instance if this rewrite context is used to index queries (percolation). */ + public IndexSearcher searcher() { + return searcher; } /** @@ -433,4 +450,11 @@ public class QueryShardContext extends QueryRewriteContext { public Index getFullyQualifiedIndex() { return fullyQualifiedIndex; } + + /** + * Return the {@link BigArrays} instance for this node. + */ + public BigArrays bigArrays() { + return bigArrays; + } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 1ef7c27c517..fb7b135d5c6 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -383,22 +383,24 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * @param retainingSequenceNumber the retaining sequence number * @param source the source of the retention lease * @return the renewed retention lease - * @throws RetentionLeaseNotFoundException if the specified retention lease does not exist + * @throws RetentionLeaseNotFoundException if the specified retention lease does not exist + * @throws RetentionLeaseInvalidRetainingSeqNoException if the new retaining sequence number is lower than + * the retaining sequence number of the current retention lease. */ public synchronized RetentionLease renewRetentionLease(final String id, final long retainingSequenceNumber, final String source) { assert primaryMode; - if (retentionLeases.contains(id) == false) { + final RetentionLease existingRetentionLease = retentionLeases.get(id); + if (existingRetentionLease == null) { throw new RetentionLeaseNotFoundException(id); } + if (retainingSequenceNumber < existingRetentionLease.retainingSequenceNumber()) { + assert PEER_RECOVERY_RETENTION_LEASE_SOURCE.equals(source) == false : + "renewing peer recovery retention lease [" + existingRetentionLease + "]" + + " with a lower retaining sequence number [" + retainingSequenceNumber + "]"; + throw new RetentionLeaseInvalidRetainingSeqNoException(id, source, retainingSequenceNumber, existingRetentionLease); + } final RetentionLease retentionLease = - new RetentionLease(id, retainingSequenceNumber, currentTimeMillisSupplier.getAsLong(), source); - final RetentionLease existingRetentionLease = retentionLeases.get(id); - assert existingRetentionLease != null; - assert existingRetentionLease.retainingSequenceNumber() <= retentionLease.retainingSequenceNumber() : - "retention lease renewal for [" + id + "]" - + " from [" + source + "]" - + " renewed a lower retaining sequence number [" + retentionLease.retainingSequenceNumber() + "]" - + " than the current lease retaining sequence number [" + existingRetentionLease.retainingSequenceNumber() + "]"; + new RetentionLease(id, retainingSequenceNumber, currentTimeMillisSupplier.getAsLong(), source); retentionLeases = new RetentionLeases( operationPrimaryTerm, retentionLeases.version() + 1, diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseInvalidRetainingSeqNoException.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseInvalidRetainingSeqNoException.java new file mode 100644 index 00000000000..7f4d34d4eca --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseInvalidRetainingSeqNoException.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +public class RetentionLeaseInvalidRetainingSeqNoException extends ElasticsearchException { + + RetentionLeaseInvalidRetainingSeqNoException(String retentionLeaseId, String source, long retainingSequenceNumber, + RetentionLease existingRetentionLease) { + super("the current retention lease with [" + retentionLeaseId + "]" + + " is retaining a higher sequence number [" + existingRetentionLease.retainingSequenceNumber() + "]" + + " than the new retaining sequence number [" + retainingSequenceNumber + "] from [" + source + "]"); + } + + public RetentionLeaseInvalidRetainingSeqNoException(StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java index bfc3faae934..97cadce6430 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java @@ -111,21 +111,19 @@ public class IndexShardSnapshotStatus { return asCopy(); } - public synchronized Copy moveToDone(final long endTime) { + public synchronized void moveToDone(final long endTime) { if (stage.compareAndSet(Stage.FINALIZE, Stage.DONE)) { this.totalTime = Math.max(0L, endTime - startTime); } else { throw new IllegalStateException("Unable to move the shard snapshot status to [DONE]: " + "expecting [FINALIZE] but got [" + stage.get() + "]"); } - return asCopy(); } - public synchronized Copy abortIfNotCompleted(final String failure) { + public synchronized void abortIfNotCompleted(final String failure) { if (stage.compareAndSet(Stage.INIT, Stage.ABORTED) || stage.compareAndSet(Stage.STARTED, Stage.ABORTED)) { this.failure = failure; } - return asCopy(); } public synchronized void moveToFailed(final long endTime, final String failure) { diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index cc95300728e..4f03c02868a 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -388,9 +388,9 @@ public class SyncedFlushService implements IndexEventListener { if (preSyncedResponse.numDocs != numDocsOnPrimary && preSyncedResponse.numDocs != PreSyncedFlushResponse.UNKNOWN_NUM_DOCS && numDocsOnPrimary != PreSyncedFlushResponse.UNKNOWN_NUM_DOCS) { - logger.warn("{} can't to issue sync id [{}] for out of sync replica [{}] with num docs [{}]; num docs on primary [{}]", + logger.debug("{} can't issue sync id [{}] for replica [{}] with num docs [{}]; num docs on primary [{}]", shardId, syncId, shard, preSyncedResponse.numDocs, numDocsOnPrimary); - results.put(shard, new ShardSyncedFlushResponse("out of sync replica; " + + results.put(shard, new ShardSyncedFlushResponse("ongoing indexing operations: " + "num docs on replica [" + preSyncedResponse.numDocs + "]; num docs on primary [" + numDocsOnPrimary + "]")); countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); continue; diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 0814e130802..2ba5214f80f 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -423,7 +423,7 @@ public class IngestService implements ClusterStateApplier { * Adds a listener that gets invoked with the current cluster state before processor factories * get invoked. * - * This is useful for components that are used by ingest processors, so that have the opportunity to update + * This is useful for components that are used by ingest processors, so that they have the opportunity to update * before these components get used by the ingest processor factory. */ public void addIngestClusterStateListener(Consumer listener) { diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 2adbd162d18..5329f3c2526 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -158,7 +158,6 @@ import org.elasticsearch.usage.UsageService; import org.elasticsearch.watcher.ResourceWatcherService; import javax.net.ssl.SNIHostName; - import java.io.BufferedWriter; import java.io.Closeable; import java.io.IOException; @@ -484,8 +483,7 @@ public class Node implements Closeable { ).collect(Collectors.toSet()); final TransportService transportService = newTransportService(settings, transport, threadPool, networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings(), taskHeaders); - final GatewayMetaState gatewayMetaState = new GatewayMetaState(settings, metaStateService, - metaDataIndexUpgradeService, metaDataUpgrader, transportService, clusterService); + final GatewayMetaState gatewayMetaState = new GatewayMetaState(settings, metaStateService); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); final SearchTransportService searchTransportService = new SearchTransportService(transportService, SearchExecutionStatsCollector.makeWrapper(responseCollectorService)); @@ -700,14 +698,14 @@ public class Node implements Closeable { assert transportService.getLocalNode().equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided"; injector.getInstance(PeerRecoverySourceService.class).start(); - final MetaData onDiskMetadata; + + // Load (and maybe upgrade) the metadata stored on disk + final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class); + gatewayMetaState.start(transportService, clusterService, + injector.getInstance(MetaDataIndexUpgradeService.class), injector.getInstance(MetaDataUpgrader.class)); // we load the global state here (the persistent part of the cluster state stored on disk) to // pass it to the bootstrap checks to allow plugins to enforce certain preconditions based on the recovered state. - if (DiscoveryNode.isMasterNode(settings()) || DiscoveryNode.isDataNode(settings())) { - onDiskMetadata = injector.getInstance(GatewayMetaState.class).getMetaData(); - } else { - onDiskMetadata = MetaData.EMPTY_META_DATA; - } + final MetaData onDiskMetadata = gatewayMetaState.getPersistedState().getLastAcceptedState().metaData(); assert onDiskMetadata != null : "metadata is null but shouldn't"; // this is never null validateNodeBeforeAcceptingRequests(new BootstrapContext(environment, onDiskMetadata), transportService.boundAddress(), pluginsService.filterPlugins(Plugin.class).stream() diff --git a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java index 54dcffab6e3..15fe23d58c7 100644 --- a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java +++ b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.persistent; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; @@ -37,13 +38,13 @@ import java.util.function.Predicate; */ public class AllocatedPersistentTask extends CancellableTask { + private static final Logger logger = LogManager.getLogger(AllocatedPersistentTask.class); private final AtomicReference state; private volatile String persistentTaskId; private volatile long allocationId; private volatile @Nullable Exception failure; private volatile PersistentTasksService persistentTasksService; - private volatile Logger logger; private volatile TaskManager taskManager; public AllocatedPersistentTask(long id, String type, String action, String description, TaskId parentTask, @@ -85,10 +86,9 @@ public class AllocatedPersistentTask extends CancellableTask { return persistentTaskId; } - void init(PersistentTasksService persistentTasksService, TaskManager taskManager, Logger logger, String persistentTaskId, long - allocationId) { + protected void init(PersistentTasksService persistentTasksService, TaskManager taskManager, + String persistentTaskId, long allocationId) { this.persistentTasksService = persistentTasksService; - this.logger = logger; this.taskManager = taskManager; this.persistentTaskId = persistentTaskId; this.allocationId = allocationId; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index 9b811a079ef..14ff29e1397 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -183,7 +183,7 @@ public class PersistentTasksNodeService implements ClusterStateListener { boolean processed = false; try { - task.init(persistentTasksService, taskManager, logger, taskInProgress.getId(), taskInProgress.getAllocationId()); + task.init(persistentTasksService, taskManager, taskInProgress.getId(), taskInProgress.getAllocationId()); logger.trace("Persistent task [{}] with id [{}] and allocation id [{}] was created", task.getAction(), task.getPersistentTaskId(), task.getAllocationId()); try { diff --git a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java index 8c9eff06988..6d9cba05748 100644 --- a/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/FilterRepository.java @@ -81,9 +81,9 @@ public class FilterRepository implements Repository { @Override public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, List shardFailures, long repositoryStateId, boolean includeGlobalState, - Map userMetadata) { + MetaData metaData, Map userMetadata) { return in.finalizeSnapshot(snapshotId, indices, startTime, failure, totalShards, shardFailures, repositoryStateId, - includeGlobalState, userMetadata); + includeGlobalState, metaData, userMetadata); } @Override @@ -121,13 +121,11 @@ public class FilterRepository implements Repository { return in.isReadOnly(); } - @Override public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, - IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { - in.snapshotShard(store, mapperService, snapshotId, indexId, snapshotIndexCommit, snapshotStatus); + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { + in.snapshotShard(store, mapperService, snapshotId, indexId, snapshotIndexCommit, snapshotStatus, listener); } - @Override public void restoreShard(Store store, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, RecoveryState recoveryState) { diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index 1d828da344b..f83712249f7 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -49,9 +49,7 @@ import java.util.function.Function; *

* To perform a snapshot: *

    - *
  • Master calls {@link #initializeSnapshot(SnapshotId, List, org.elasticsearch.cluster.metadata.MetaData)} - * with list of indices that will be included into the snapshot
  • - *
  • Data nodes call {@link Repository#snapshotShard(Store, MapperService, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} + *
  • Data nodes call {@link Repository#snapshotShard} * for each shard
  • *
  • When all shard calls return master calls {@link #finalizeSnapshot} with possible list of failures
  • *
@@ -117,7 +115,11 @@ public interface Repository extends LifecycleComponent { * @param snapshotId snapshot id * @param indices list of indices to be snapshotted * @param metaData cluster metadata + * + * @deprecated this method is only used when taking snapshots in a mixed version cluster where a master node older than + * {@link org.elasticsearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION} is present. */ + @Deprecated void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData metaData); /** @@ -137,7 +139,7 @@ public interface Repository extends LifecycleComponent { */ SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, List shardFailures, long repositoryStateId, boolean includeGlobalState, - Map userMetadata); + MetaData clusterMetaData, Map userMetadata); /** * Deletes snapshot @@ -191,27 +193,6 @@ public interface Repository extends LifecycleComponent { */ boolean isReadOnly(); - /** - * Creates a snapshot of the shard based on the index commit point. - *

- * The index commit point can be obtained by using {@link org.elasticsearch.index.engine.Engine#acquireLastIndexCommit} method. - * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller. - *

- * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check - * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. - * @param indexShard the shard to be snapshotted - * @param snapshotId snapshot id - * @param indexId id for the index being snapshotted - * @param snapshotIndexCommit commit point - * @param snapshotStatus snapshot status - * @deprecated use {@link #snapshotShard(Store, MapperService, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} instead - */ - @Deprecated - default void snapshotShard(IndexShard indexShard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus) { - snapshotShard(indexShard.store(), indexShard.mapperService(), snapshotId, indexId, snapshotIndexCommit, snapshotStatus); - } - /** * Creates a snapshot of the shard based on the index commit point. *

@@ -226,9 +207,10 @@ public interface Repository extends LifecycleComponent { * @param indexId id for the index being snapshotted * @param snapshotIndexCommit commit point * @param snapshotStatus snapshot status + * @param listener listener invoked on completion */ void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, - IndexShardSnapshotStatus snapshotStatus); + IndexShardSnapshotStatus snapshotStatus, ActionListener listener); /** * Restores snapshot of the shard. diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 8a9c12f9f4c..b2db14a5c29 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -32,6 +32,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.StepListener; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -85,7 +86,6 @@ import org.elasticsearch.repositories.RepositoryCleanupResult; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryVerificationException; -import org.elasticsearch.snapshots.InvalidSnapshotNameException; import org.elasticsearch.snapshots.SnapshotCreationException; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotId; @@ -108,6 +108,9 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; import java.util.stream.Collectors; import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; @@ -178,7 +181,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private static final String TESTS_FILE = "tests-"; - private static final String METADATA_PREFIX = "meta-"; + public static final String METADATA_PREFIX = "meta-"; public static final String METADATA_NAME_FORMAT = METADATA_PREFIX + "%s.dat"; @@ -382,23 +385,13 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp @Override public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData clusterMetaData) { - if (isReadOnly()) { - throw new RepositoryException(metadata.name(), "cannot create snapshot in a readonly repository"); - } try { - final String snapshotName = snapshotId.getName(); - // check if the snapshot name already exists in the repository - final RepositoryData repositoryData = getRepositoryData(); - if (repositoryData.getSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { - throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists"); - } - // Write Global MetaData - globalMetaDataFormat.write(clusterMetaData, blobContainer(), snapshotId.getUUID()); + globalMetaDataFormat.write(clusterMetaData, blobContainer(), snapshotId.getUUID(), true); // write the index metadata for each index in the snapshot for (IndexId index : indices) { - indexMetaDataFormat.write(clusterMetaData.index(index.getName()), indexContainer(index), snapshotId.getUUID()); + indexMetaDataFormat.write(clusterMetaData.index(index.getName()), indexContainer(index), snapshotId.getUUID(), true); } } catch (IOException ex) { throw new SnapshotCreationException(metadata.name(), snapshotId, ex); @@ -410,58 +403,97 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp if (isReadOnly()) { listener.onFailure(new RepositoryException(metadata.name(), "cannot delete snapshot from a readonly repository")); } else { - SnapshotInfo snapshot = null; try { - snapshot = getSnapshotInfo(snapshotId); - } catch (SnapshotMissingException ex) { - listener.onFailure(ex); - return; - } catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) { - logger.warn(() -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex); - } - // Delete snapshot from the index file, since it is the maintainer of truth of active snapshots - final RepositoryData updatedRepositoryData; - final Map foundIndices; - final Set rootBlobs; - try { - rootBlobs = blobContainer().listBlobs().keySet(); - final RepositoryData repositoryData = getRepositoryData(latestGeneration(rootBlobs)); - updatedRepositoryData = repositoryData.removeSnapshot(snapshotId); - // Cache the indices that were found before writing out the new index-N blob so that a stuck master will never - // delete an index that was created by another master node after writing this index-N blob. - - foundIndices = blobStore().blobContainer(indicesPath()).children(); - writeIndexGen(updatedRepositoryData, repositoryStateId); + final Map rootBlobs = blobContainer().listBlobs(); + final RepositoryData repositoryData = getRepositoryData(latestGeneration(rootBlobs.keySet())); + final Map foundIndices = blobStore().blobContainer(indicesPath()).children(); + doDeleteShardSnapshots(snapshotId, repositoryStateId, foundIndices, rootBlobs, repositoryData, listener); } catch (Exception ex) { listener.onFailure(new RepositoryException(metadata.name(), "failed to delete snapshot [" + snapshotId + "]", ex)); - return; } - final SnapshotInfo finalSnapshotInfo = snapshot; - final List snapMetaFilesToDelete = - Arrays.asList(snapshotFormat.blobName(snapshotId.getUUID()), globalMetaDataFormat.blobName(snapshotId.getUUID())); - try { - blobContainer().deleteBlobsIgnoringIfNotExists(snapMetaFilesToDelete); - } catch (IOException e) { - logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete global metadata files", snapshotId), e); - } - final Map survivingIndices = updatedRepositoryData.getIndices(); - deleteIndices( - updatedRepositoryData, - Optional.ofNullable(finalSnapshotInfo) - .map(info -> info.indices().stream().filter(survivingIndices::containsKey) - .map(updatedRepositoryData::resolveIndexId).collect(Collectors.toList())) - .orElse(Collections.emptyList()), - snapshotId, - ActionListener.map(listener, v -> { - cleanupStaleIndices(foundIndices, survivingIndices.values().stream().map(IndexId::getId).collect(Collectors.toSet())); - cleanupStaleRootFiles( - staleRootBlobs(updatedRepositoryData, Sets.difference(rootBlobs, new HashSet<>(snapMetaFilesToDelete)))); - return null; - }) - ); } } + /** + * After updating the {@link RepositoryData} each of the shards directories is individually first moved to the next shard generation + * and then has all now unreferenced blobs in it deleted. + * + * @param snapshotId SnapshotId to delete + * @param repositoryStateId Expected repository state id + * @param foundIndices All indices folders found in the repository before executing any writes to the repository during this + * delete operation + * @param rootBlobs All blobs found at the root of the repository before executing any writes to the repository during this + * delete operation + * @param repositoryData RepositoryData found the in the repository before executing this delete + * @param listener Listener to invoke once finished + */ + private void doDeleteShardSnapshots(SnapshotId snapshotId, long repositoryStateId, Map foundIndices, + Map rootBlobs, RepositoryData repositoryData, + ActionListener listener) throws IOException { + final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId); + // Cache the indices that were found before writing out the new index-N blob so that a stuck master will never + // delete an index that was created by another master node after writing this index-N blob. + writeIndexGen(updatedRepositoryData, repositoryStateId); + SnapshotInfo snapshot = null; + try { + snapshot = getSnapshotInfo(snapshotId); + } catch (SnapshotMissingException ex) { + listener.onFailure(ex); + return; + } catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) { + logger.warn(() -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex); + } + final List snapMetaFilesToDelete = + Arrays.asList(snapshotFormat.blobName(snapshotId.getUUID()), globalMetaDataFormat.blobName(snapshotId.getUUID())); + try { + blobContainer().deleteBlobsIgnoringIfNotExists(snapMetaFilesToDelete); + } catch (IOException e) { + logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete global metadata files", snapshotId), e); + } + final Map survivingIndices = updatedRepositoryData.getIndices(); + deleteIndices( + updatedRepositoryData, + Optional.ofNullable(snapshot).map(info -> info.indices().stream().filter(survivingIndices::containsKey) + .map(updatedRepositoryData::resolveIndexId).collect(Collectors.toList())).orElse(Collections.emptyList()), + snapshotId, + ActionListener.delegateFailure(listener, + (l, v) -> cleanupStaleBlobs(foundIndices, + Sets.difference(rootBlobs.keySet(), new HashSet<>(snapMetaFilesToDelete)).stream().collect( + Collectors.toMap(Function.identity(), rootBlobs::get)), + updatedRepositoryData, ActionListener.map(l, ignored -> null)))); + } + + /** + * Cleans up stale blobs directly under the repository root as well as all indices paths that aren't referenced by any existing + * snapshots. This method is only to be called directly after a new {@link RepositoryData} was written to the repository and with + * parameters {@code foundIndices}, {@code rootBlobs} + * + * @param foundIndices all indices blob containers found in the repository before {@code newRepoData} was written + * @param rootBlobs all blobs found directly under the repository root + * @param newRepoData new repository data that was just written + * @param listener listener to invoke with the combined {@link DeleteResult} of all blobs removed in this operation + */ + private void cleanupStaleBlobs(Map foundIndices, Map rootBlobs, + RepositoryData newRepoData, ActionListener listener) { + final GroupedActionListener groupedListener = new GroupedActionListener<>(ActionListener.wrap(deleteResults -> { + DeleteResult deleteResult = DeleteResult.ZERO; + for (DeleteResult result : deleteResults) { + deleteResult = deleteResult.add(result); + } + listener.onResponse(deleteResult); + }, listener::onFailure), 2); + + final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + executor.execute(ActionRunnable.wrap(groupedListener, l -> { + List deletedBlobs = cleanupStaleRootFiles(staleRootBlobs(newRepoData, rootBlobs.keySet())); + l.onResponse( + new DeleteResult(deletedBlobs.size(), deletedBlobs.stream().mapToLong(name -> rootBlobs.get(name).length()).sum())); + })); + + final Set survivingIndexIds = newRepoData.getIndices().values().stream().map(IndexId::getId).collect(Collectors.toSet()); + executor.execute(ActionRunnable.wrap(groupedListener, l -> l.onResponse(cleanupStaleIndices(foundIndices, survivingIndexIds)))); + } + /** * Runs cleanup actions on the repository. Increments the repository state id by one before executing any modifications on the * repository. @@ -474,7 +506,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp * @param listener Lister to complete when done */ public void cleanup(long repositoryStateId, ActionListener listener) { - ActionListener.completeWith(listener, () -> { + try { if (isReadOnly()) { throw new RepositoryException(metadata.name(), "cannot run cleanup on readonly repository"); } @@ -492,15 +524,15 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp final List staleRootBlobs = staleRootBlobs(repositoryData, rootBlobs.keySet()); if (survivingIndexIds.equals(foundIndices.keySet()) && staleRootBlobs.isEmpty()) { // Nothing to clean up we return - return new RepositoryCleanupResult(DeleteResult.ZERO); + listener.onResponse(new RepositoryCleanupResult(DeleteResult.ZERO)); + } else { + // write new index-N blob to ensure concurrent operations will fail + writeIndexGen(repositoryData, repositoryStateId); + cleanupStaleBlobs(foundIndices, rootBlobs, repositoryData, ActionListener.map(listener, RepositoryCleanupResult::new)); } - // write new index-N blob to ensure concurrent operations will fail - writeIndexGen(repositoryData, repositoryStateId); - final DeleteResult deleteIndicesResult = cleanupStaleIndices(foundIndices, survivingIndexIds); - List cleaned = cleanupStaleRootFiles(staleRootBlobs); - return new RepositoryCleanupResult( - deleteIndicesResult.add(cleaned.size(), cleaned.stream().mapToLong(name -> rootBlobs.get(name).length()).sum())); - }); + } catch (Exception e) { + listener.onFailure(e); + } } // Finds all blobs directly under the repository root path that are not referenced by the current RepositoryData @@ -579,6 +611,13 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp return deleteResult; } + /** + * @param repositoryData RepositoryData with the snapshot removed + * @param indices Indices to remove the snapshot from (should not contain indices that become completely unreferenced with the + * removal of this snapshot as those are cleaned up afterwards by {@link #cleanupStaleBlobs}) + * @param snapshotId SnapshotId to remove from all the given indices + * @param listener Listener to invoke when finished + */ private void deleteIndices(RepositoryData repositoryData, List indices, SnapshotId snapshotId, ActionListener listener) { if (indices.isEmpty()) { @@ -634,14 +673,34 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp final List shardFailures, final long repositoryStateId, final boolean includeGlobalState, + final MetaData clusterMetaData, final Map userMetadata) { SnapshotInfo blobStoreSnapshot = new SnapshotInfo(snapshotId, indices.stream().map(IndexId::getName).collect(Collectors.toList()), startTime, failure, threadPool.absoluteTimeInMillis(), totalShards, shardFailures, includeGlobalState, userMetadata); + + try { + // We ignore all FileAlreadyExistsException here since otherwise a master failover while in this method will + // mean that no snap-${uuid}.dat blob is ever written for this snapshot. This is safe because any updated version of the + // index or global metadata will be compatible with the segments written in this snapshot as well. + // Failing on an already existing index-${repoGeneration} below ensures that the index.latest blob is not updated in a way that + // decrements the generation it points at + + // Write Global MetaData + globalMetaDataFormat.write(clusterMetaData, blobContainer(), snapshotId.getUUID(), false); + + // write the index metadata for each index in the snapshot + for (IndexId index : indices) { + indexMetaDataFormat.write(clusterMetaData.index(index.getName()), indexContainer(index), snapshotId.getUUID(), false); + } + } catch (IOException ex) { + throw new SnapshotException(metadata.name(), snapshotId, "failed to write metadata for snapshot", ex); + } + try { final RepositoryData updatedRepositoryData = getRepositoryData().addSnapshot(snapshotId, blobStoreSnapshot.state(), indices); - snapshotFormat.write(blobStoreSnapshot, blobContainer(), snapshotId.getUUID()); + snapshotFormat.write(blobStoreSnapshot, blobContainer(), snapshotId.getUUID(), false); writeIndexGen(updatedRepositoryData, repositoryStateId); } catch (FileAlreadyExistsException ex) { // if another master was elected and took over finalizing the snapshot, it is possible @@ -909,9 +968,15 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp @Override public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, - IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { final ShardId shardId = store.shardId(); final long startTime = threadPool.absoluteTimeInMillis(); + final StepListener snapshotDoneListener = new StepListener<>(); + snapshotDoneListener.whenComplete(listener::onResponse, e -> { + snapshotStatus.moveToFailed(threadPool.absoluteTimeInMillis(), ExceptionsHelper.detailedMessage(e)); + listener.onFailure(e instanceof IndexShardSnapshotFailedException ? (IndexShardSnapshotFailedException) e + : new IndexShardSnapshotFailedException(store.shardId(), e)); + }); try { logger.debug("[{}] [{}] snapshot to [{}] ...", shardId, snapshotId, metadata.name()); @@ -933,132 +998,155 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } final List indexCommitPointFiles = new ArrayList<>(); + ArrayList filesToSnapshot = new ArrayList<>(); store.incRef(); + final Collection fileNames; + final Store.MetadataSnapshot metadataFromStore; try { - ArrayList filesToSnapshot = new ArrayList<>(); - final Store.MetadataSnapshot metadata; // TODO apparently we don't use the MetadataSnapshot#.recoveryDiff(...) here but we should - final Collection fileNames; try { logger.trace( "[{}] [{}] Loading store metadata using index commit [{}]", shardId, snapshotId, snapshotIndexCommit); - metadata = store.getMetadata(snapshotIndexCommit); + metadataFromStore = store.getMetadata(snapshotIndexCommit); fileNames = snapshotIndexCommit.getFileNames(); } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e); } - int indexIncrementalFileCount = 0; - int indexTotalNumberOfFiles = 0; - long indexIncrementalSize = 0; - long indexTotalFileCount = 0; - for (String fileName : fileNames) { - if (snapshotStatus.isAborted()) { - logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName); - throw new IndexShardSnapshotFailedException(shardId, "Aborted"); - } - - logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName); - final StoreFileMetaData md = metadata.get(fileName); - BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = null; - List filesInfo = snapshots.findPhysicalIndexFiles(fileName); - if (filesInfo != null) { - for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) { - if (fileInfo.isSame(md)) { - // a commit point file with the same name, size and checksum was already copied to repository - // we will reuse it for this snapshot - existingFileInfo = fileInfo; - break; - } - } - } - - indexTotalFileCount += md.length(); - indexTotalNumberOfFiles++; - - if (existingFileInfo == null) { - indexIncrementalFileCount++; - indexIncrementalSize += md.length(); - // create a new FileInfo - BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = - new BlobStoreIndexShardSnapshot.FileInfo(DATA_BLOB_PREFIX + UUIDs.randomBase64UUID(), md, chunkSize()); - indexCommitPointFiles.add(snapshotFileInfo); - filesToSnapshot.add(snapshotFileInfo); - } else { - indexCommitPointFiles.add(existingFileInfo); - } - } - - snapshotStatus.moveToStarted(startTime, indexIncrementalFileCount, - indexTotalNumberOfFiles, indexIncrementalSize, indexTotalFileCount); - - for (BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo : filesToSnapshot) { - try { - snapshotFile(snapshotFileInfo, indexId, shardId, snapshotId, snapshotStatus, store); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException(shardId, "Failed to perform snapshot (index files)", e); - } - } } finally { store.decRef(); } + int indexIncrementalFileCount = 0; + int indexTotalNumberOfFiles = 0; + long indexIncrementalSize = 0; + long indexTotalFileCount = 0; + for (String fileName : fileNames) { + if (snapshotStatus.isAborted()) { + logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName); + throw new IndexShardSnapshotFailedException(shardId, "Aborted"); + } - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); + logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName); + final StoreFileMetaData md = metadataFromStore.get(fileName); + BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = null; + List filesInfo = snapshots.findPhysicalIndexFiles(fileName); + if (filesInfo != null) { + for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) { + if (fileInfo.isSame(md)) { + // a commit point file with the same name, size and checksum was already copied to repository + // we will reuse it for this snapshot + existingFileInfo = fileInfo; + break; + } + } + } - // now create and write the commit point - final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), - lastSnapshotStatus.getIndexVersion(), - indexCommitPointFiles, - lastSnapshotStatus.getStartTime(), - threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), - lastSnapshotStatus.getIncrementalFileCount(), - lastSnapshotStatus.getIncrementalSize() - ); + indexTotalFileCount += md.length(); + indexTotalNumberOfFiles++; - logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); - try { - indexShardSnapshotFormat.write(snapshot, shardContainer, snapshotId.getUUID()); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e); + if (existingFileInfo == null) { + indexIncrementalFileCount++; + indexIncrementalSize += md.length(); + // create a new FileInfo + BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo = + new BlobStoreIndexShardSnapshot.FileInfo(DATA_BLOB_PREFIX + UUIDs.randomBase64UUID(), md, chunkSize()); + indexCommitPointFiles.add(snapshotFileInfo); + filesToSnapshot.add(snapshotFileInfo); + } else { + indexCommitPointFiles.add(existingFileInfo); + } } - // delete all files that are not referenced by any commit point - // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones - List newSnapshotsList = new ArrayList<>(); - newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); - for (SnapshotFiles point : snapshots) { - newSnapshotsList.add(point); + snapshotStatus.moveToStarted(startTime, indexIncrementalFileCount, + indexTotalNumberOfFiles, indexIncrementalSize, indexTotalFileCount); + + assert indexIncrementalFileCount == filesToSnapshot.size(); + + final StepListener> allFilesUploadedListener = new StepListener<>(); + allFilesUploadedListener.whenComplete(v -> { + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = + snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); + + // now create and write the commit point + final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), + lastSnapshotStatus.getIndexVersion(), + indexCommitPointFiles, + lastSnapshotStatus.getStartTime(), + threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), + lastSnapshotStatus.getIncrementalFileCount(), + lastSnapshotStatus.getIncrementalSize() + ); + + logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); + try { + indexShardSnapshotFormat.write(snapshot, shardContainer, snapshotId.getUUID(), false); + } catch (IOException e) { + throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e); + } + // delete all files that are not referenced by any commit point + // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones + List newSnapshotsList = new ArrayList<>(); + newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); + for (SnapshotFiles point : snapshots) { + newSnapshotsList.add(point); + } + final String indexGeneration = Long.toString(fileListGeneration + 1); + final List blobsToDelete; + try { + final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList); + indexShardSnapshotsFormat.writeAtomic(updatedSnapshots, shardContainer, indexGeneration); + // Delete all previous index-N blobs + blobsToDelete = + blobs.keySet().stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)).collect(Collectors.toList()); + assert blobsToDelete.stream().mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, ""))) + .max().orElse(-1L) < Long.parseLong(indexGeneration) + : "Tried to delete an index-N blob newer than the current generation [" + indexGeneration + + "] when deleting index-N blobs " + blobsToDelete; + } catch (IOException e) { + throw new IndexShardSnapshotFailedException(shardId, + "Failed to finalize snapshot creation [" + snapshotId + "] with shard index [" + + indexShardSnapshotsFormat.blobName(indexGeneration) + "]", e); + } + try { + shardContainer.deleteBlobsIgnoringIfNotExists(blobsToDelete); + } catch (IOException e) { + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete old index-N blobs during finalization", + snapshotId, shardId), e); + } + snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis()); + snapshotDoneListener.onResponse(null); + }, snapshotDoneListener::onFailure); + if (indexIncrementalFileCount == 0) { + allFilesUploadedListener.onResponse(Collections.emptyList()); + return; } - final String indexGeneration = Long.toString(fileListGeneration + 1); - final List blobsToDelete; - try { - final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList); - indexShardSnapshotsFormat.writeAtomic(updatedSnapshots, shardContainer, indexGeneration); - // Delete all previous index-N blobs - blobsToDelete = - blobs.keySet().stream().filter(blob -> blob.startsWith(SNAPSHOT_INDEX_PREFIX)).collect(Collectors.toList()); - assert blobsToDelete.stream().mapToLong(b -> Long.parseLong(b.replaceFirst(SNAPSHOT_INDEX_PREFIX, ""))) - .max().orElse(-1L) < Long.parseLong(indexGeneration) - : "Tried to delete an index-N blob newer than the current generation [" + indexGeneration + "] when deleting index-N" + - " blobs " + blobsToDelete; - } catch (IOException e) { - throw new IndexShardSnapshotFailedException(shardId, - "Failed to finalize snapshot creation [" + snapshotId + "] with shard index [" - + indexShardSnapshotsFormat.blobName(indexGeneration) + "]", e); + final GroupedActionListener filesListener = + new GroupedActionListener<>(allFilesUploadedListener, indexIncrementalFileCount); + final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + // Flag to signal that the snapshot has been aborted/failed so we can stop any further blob uploads from starting + final AtomicBoolean alreadyFailed = new AtomicBoolean(); + for (BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo : filesToSnapshot) { + executor.execute(new ActionRunnable(filesListener) { + @Override + protected void doRun() { + try { + if (alreadyFailed.get() == false) { + snapshotFile(snapshotFileInfo, indexId, shardId, snapshotId, snapshotStatus, store); + } + filesListener.onResponse(null); + } catch (IOException e) { + throw new IndexShardSnapshotFailedException(shardId, "Failed to perform snapshot (index files)", e); + } + } + + @Override + public void onFailure(Exception e) { + alreadyFailed.set(true); + super.onFailure(e); + } + }); } - try { - shardContainer.deleteBlobsIgnoringIfNotExists(blobsToDelete); - } catch (IOException e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete old index-N blobs during finalization", - snapshotId, shardId), e); - } - snapshotStatus.moveToDone(threadPool.absoluteTimeInMillis()); } catch (Exception e) { - snapshotStatus.moveToFailed(threadPool.absoluteTimeInMillis(), ExceptionsHelper.detailedMessage(e)); - if (e instanceof IndexShardSnapshotFailedException) { - throw (IndexShardSnapshotFailedException) e; - } else { - throw new IndexShardSnapshotFailedException(store.shardId(), e); - } + snapshotDoneListener.onFailure(e); } } @@ -1178,14 +1266,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp .collect(Collectors.toSet()); final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(newSnapshotsList); indexShardSnapshotsFormat.writeAtomic(updatedSnapshots, shardContainer, indexGeneration); - // Delete all previous index-N, data- and meta-blobs and that are not referenced by the new index-N and temporary blobs - blobsToDelete = blobs.keySet().stream().filter(blob -> - blob.startsWith(SNAPSHOT_INDEX_PREFIX) - || (blob.startsWith(SNAPSHOT_PREFIX) && blob.endsWith(".dat") - && survivingSnapshotUUIDs.contains( - blob.substring(SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length())) == false) - || (blob.startsWith(DATA_BLOB_PREFIX) && updatedSnapshots.findNameFile(canonicalName(blob)) == null) - || FsBlobContainer.isTempBlobName(blob)).collect(Collectors.toList()); + blobsToDelete = unusedBlobs(blobs, survivingSnapshotUUIDs, updatedSnapshots); } try { shardContainer.deleteBlobsIgnoringIfNotExists(blobsToDelete); @@ -1200,6 +1281,20 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } } + // Unused blobs are all previous index-, data- and meta-blobs and that are not referenced by the new index- as well as all + // temporary blobs + private static List unusedBlobs(Map blobs, Set survivingSnapshotUUIDs, + BlobStoreIndexShardSnapshots updatedSnapshots) { + return blobs.keySet().stream().filter(blob -> + blob.startsWith(SNAPSHOT_INDEX_PREFIX) + || (blob.startsWith(SNAPSHOT_PREFIX) && blob.endsWith(".dat") + && survivingSnapshotUUIDs.contains( + blob.substring(SNAPSHOT_PREFIX.length(), blob.length() - ".dat".length())) == false) + || (blob.startsWith(DATA_BLOB_PREFIX) && updatedSnapshots.findNameFile(canonicalName(blob)) == null) + || FsBlobContainer.isTempBlobName(blob)).collect(Collectors.toList()); + } + + /** * Loads information about shard snapshot */ @@ -1245,6 +1340,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp IndexShardSnapshotStatus snapshotStatus, Store store) throws IOException { final BlobContainer shardContainer = shardContainer(indexId, shardId); final String file = fileInfo.physicalName(); + store.incRef(); try (IndexInput indexInput = store.openVerifyingInput(file, IOContext.READONCE, fileInfo.metadata())) { for (int i = 0; i < fileInfo.numberOfParts(); i++) { final long partBytes = fileInfo.partBytes(i); @@ -1284,6 +1380,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp failStoreIfCorrupted(store, t); snapshotStatus.addProcessedFile(0); throw t; + } finally { + store.decRef(); } } @@ -1297,31 +1395,4 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } } } - - /** - * Checks if snapshot file already exists in the list of blobs - * @param fileInfo file to check - * @param blobs list of blobs - * @return true if file exists in the list of blobs - */ - private static boolean snapshotFileExistsInBlobs(BlobStoreIndexShardSnapshot.FileInfo fileInfo, Map blobs) { - BlobMetaData blobMetaData = blobs.get(fileInfo.name()); - if (blobMetaData != null) { - return blobMetaData.length() == fileInfo.length(); - } else if (blobs.containsKey(fileInfo.partName(0))) { - // multi part file sum up the size and check - int part = 0; - long totalSize = 0; - while (true) { - blobMetaData = blobs.get(fileInfo.partName(part++)); - if (blobMetaData == null) { - break; - } - totalSize += blobMetaData.length(); - } - return totalSize == fileInfo.length(); - } - // no file, not exact and not multipart - return false; - } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 605dcae6489..9c7c7559fcb 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -175,15 +175,16 @@ public final class ChecksumBlobStoreFormat { *

* The blob will be compressed and checksum will be written if required. * - * @param obj object to be serialized - * @param blobContainer blob container - * @param name blob name + * @param obj object to be serialized + * @param blobContainer blob container + * @param name blob name + * @param failIfAlreadyExists Whether to fail if the blob already exists */ - public void write(T obj, BlobContainer blobContainer, String name) throws IOException { + public void write(T obj, BlobContainer blobContainer, String name, boolean failIfAlreadyExists) throws IOException { final String blobName = blobName(name); writeTo(obj, blobName, bytesArray -> { try (InputStream stream = bytesArray.streamInput()) { - blobContainer.writeBlob(blobName, stream, bytesArray.length(), true); + blobContainer.writeBlob(blobName, stream, bytesArray.length(), failIfAlreadyExists); } }); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java new file mode 100644 index 00000000000..5cc98f6c3e9 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/package-info.java @@ -0,0 +1,213 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + *

This package exposes the blobstore repository used by Elasticsearch Snapshots.

+ * + *

Preliminaries

+ * + *

The {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository} forms the basis of implementations of + * {@link org.elasticsearch.repositories.Repository} on top of a blob store. A blobstore can be used as the basis for an implementation + * as long as it provides for GET, PUT, DELETE, and LIST operations. For a read-only repository, it suffices if the blobstore provides only + * GET operations. + * These operations are formally defined as specified by the {@link org.elasticsearch.common.blobstore.BlobContainer} interface that + * any {@code BlobStoreRepository} implementation must provide via its implementation of + * {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository#getBlobContainer()}.

+ * + *

The blob store is written to and read from by master-eligible nodes and data nodes. All metadata related to a snapshot's + * scope and health is written by the master node.

+ *

The data-nodes on the other hand, write the data for each individual shard but do not write any blobs outside of shard directories for + * shards that they hold the primary of. For each shard, the data-node holding the shard's primary writes the actual data in form of + * the shard's segment files to the repository as well as metadata about all the segment files that the repository stores for the shard.

+ * + *

For the specifics on how the operations on the repository documented below are invoked during the snapshot process please refer to + * the documentation of the {@link org.elasticsearch.snapshots} package.

+ * + *

{@code BlobStoreRepository} maintains the following structure of blobs containing data and metadata in the blob store. The exact + * operations executed on these blobs are explained below.

+ *
+ * {@code
+ *   STORE_ROOT
+ *   |- index-N           - JSON serialized {@link org.elasticsearch.repositories.RepositoryData} containing a list of all snapshot ids
+ *   |                      and the indices belonging to each snapshot, N is the generation of the file
+ *   |- index.latest      - contains the numeric value of the latest generation of the index file (i.e. N from above)
+ *   |- incompatible-snapshots - list of all snapshot ids that are no longer compatible with the current version of the cluster
+ *   |- snap-20131010.dat - SMILE serialized {@link org.elasticsearch.snapshots.SnapshotInfo} for snapshot "20131010"
+ *   |- meta-20131010.dat - SMILE serialized {@link org.elasticsearch.cluster.metadata.MetaData} for snapshot "20131010"
+ *   |                      (includes only global metadata)
+ *   |- snap-20131011.dat - SMILE serialized {@link org.elasticsearch.snapshots.SnapshotInfo} for snapshot "20131011"
+ *   |- meta-20131011.dat - SMILE serialized {@link org.elasticsearch.cluster.metadata.MetaData} for snapshot "20131011"
+ *   .....
+ *   |- indices/ - data for all indices
+ *      |- Ac1342-B_x/ - data for index "foo" which was assigned the unique id Ac1342-B_x (not to be confused with the actual index uuid)
+ *      |  |             in the repository
+ *      |  |- meta-20131010.dat - JSON Serialized {@link org.elasticsearch.cluster.metadata.IndexMetaData} for index "foo"
+ *      |  |- 0/ - data for shard "0" of index "foo"
+ *      |  |  |- __1                      \  (files with numeric names were created by older ES versions)
+ *      |  |  |- __2                      |
+ *      |  |  |- __VPO5oDMVT5y4Akv8T_AO_A |- files from different segments see snap-* for their mappings to real segment files
+ *      |  |  |- __1gbJy18wS_2kv1qI7FgKuQ |
+ *      |  |  |- __R8JvZAHlSMyMXyZc2SS8Zg /
+ *      |  |  .....
+ *      |  |  |- snap-20131010.dat - SMILE serialized {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot} for
+ *      |  |  |                      snapshot "20131010"
+ *      |  |  |- snap-20131011.dat - SMILE serialized {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot} for
+ *      |  |  |                      snapshot "20131011"
+ *      |  |  |- index-123         - SMILE serialized {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots} for
+ *      |  |  |                      the shard
+ *      |  |
+ *      |  |- 1/ - data for shard "1" of index "foo"
+ *      |  |  |- __1
+ *      |  |  .....
+ *      |  |
+ *      |  |-2/
+ *      |  ......
+ *      |
+ *      |- 1xB0D8_B3y/ - data for index "bar" which was assigned the unique id of 1xB0D8_B3y in the repository
+ *      ......
+ * }
+ * 
+ * + *

Getting the Repository's RepositoryData

+ * + *

Loading the {@link org.elasticsearch.repositories.RepositoryData} that holds the list of all snapshots as well as the mapping of + * indices' names to their repository {@link org.elasticsearch.repositories.IndexId} is done by invoking + * {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository#getRepositoryData} and implemented as follows:

+ *
    + *
  1. + *
      + *
    1. The blobstore repository stores the {@code RepositoryData} in blobs named with incrementing suffix {@code N} at {@code /index-N} + * directly under the repository's root.
    2. + *
    3. The blobstore also stores the most recent {@code N} as a 64bit long in the blob {@code /index.latest} directly under the + * repository's root.
    4. + *
    + *
  2. + *
  3. + *
      + *
    1. First, find the most recent {@code RepositoryData} by getting a list of all index-N blobs through listing all blobs with prefix + * "index-" under the repository root and then selecting the one with the highest value for N.
    2. + *
    3. If this operation fails because the repository's {@code BlobContainer} does not support list operations (in the case of read-only + * repositories), read the highest value of N from the index.latest blob.
    4. + *
    + *
  4. + *
  5. + *
      + *
    1. Use the just determined value of {@code N} and get the {@code /index-N} blob and deserialize the {@code RepositoryData} from it.
    2. + *
    3. If no value of {@code N} could be found since neither an {@code index.latest} nor any {@code index-N} blobs exist in the repository, + * it is assumed to be empty and {@link org.elasticsearch.repositories.RepositoryData#EMPTY} is returned.
    4. + *
    + *
  6. + *
+ *

Creating a Snapshot

+ * + *

Creating a snapshot in the repository happens in the three steps described in detail below.

+ * + *

Initializing a Snapshot in the Repository (Mixed Version Clusters only)

+ * + *

In mixed version clusters that contain a node older than + * {@link org.elasticsearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION}, creating a snapshot in the repository starts with a + * call to {@link org.elasticsearch.repositories.Repository#initializeSnapshot} which the blob store repository implements via the + * following actions:

+ *
    + *
  1. Verify that no snapshot by the requested name exists.
  2. + *
  3. Write a blob containing the cluster metadata to the root of the blob store repository at {@code /meta-${snapshot-uuid}.dat}
  4. + *
  5. Write the metadata for each index to a blob in that index's directory at + * {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}.dat}
  6. + *
+ * TODO: Remove this section once BwC logic it references is removed + * + *

Writing Shard Data (Segments)

+ * + *

Once all the metadata has been written by the snapshot initialization, the snapshot process moves on to writing the actual shard data + * to the repository by invoking {@link org.elasticsearch.repositories.Repository#snapshotShard} on the data-nodes that hold the primaries + * for the shards in the current snapshot. It is implemented as follows:

+ * + *

Note:

+ *
    + *
  • For each shard {@code i} in a given index, its path in the blob store is located at {@code /indices/${index-snapshot-uuid}/${i}}
  • + *
  • All the following steps are executed exclusively on the shard's primary's data node.
  • + *
+ * + *
    + *
  1. Create the {@link org.apache.lucene.index.IndexCommit} for the shard to snapshot.
  2. + *
  3. List all blobs in the shard's path. Find the {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots} blob + * with name {@code index-${N}} for the highest possible value of {@code N} in the list to get the information of what segment files are + * already available in the blobstore.
  4. + *
  5. By comparing the files in the {@code IndexCommit} and the available file list from the previous step, determine the segment files + * that need to be written to the blob store. For each segment that needs to be added to the blob store, generate a unique name by combining + * the segment data blob prefix {@code __} and a UUID and write the segment to the blobstore.
  6. + *
  7. After completing all segment writes, a blob containing a + * {@link org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot} with name {@code snap-${snapshot-uuid}.dat} is written to + * the shard's path and contains a list of all the files referenced by the snapshot as well as some metadata about the snapshot. See the + * documentation of {@code BlobStoreIndexShardSnapshot} for details on its contents.
  8. + *
  9. Once all the segments and the {@code BlobStoreIndexShardSnapshot} blob have been written, an updated + * {@code BlobStoreIndexShardSnapshots} blob is written to the shard's path with name {@code index-${N+1}}.
  10. + *
+ * + *

Finalizing the Snapshot

+ * + *

After all primaries have finished writing the necessary segment files to the blob store in the previous step, the master node moves on + * to finalizing the snapshot by invoking {@link org.elasticsearch.repositories.Repository#finalizeSnapshot}. This method executes the + * following actions in order:

+ *
    + *
  1. Write a blob containing the cluster metadata to the root of the blob store repository at {@code /meta-${snapshot-uuid}.dat}
  2. + *
  3. Write the metadata for each index to a blob in that index's directory at + * {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}.dat}
  4. + *
  5. Write the {@link org.elasticsearch.snapshots.SnapshotInfo} blob for the given snapshot to the key {@code /snap-${snapshot-uuid}.dat} + * directly under the repository root.
  6. + *
  7. Write an updated {@code RepositoryData} blob to the key {@code /index-${N+1}} using the {@code N} determined when initializing the + * snapshot in the first step. When doing this, the implementation checks that the blob for generation {@code N + 1} has not yet been + * written to prevent concurrent updates to the repository. If the blob for {@code N + 1} already exists the execution of finalization + * stops under the assumption that a master failover occurred and the snapshot has already been finalized by the new master.
  8. + *
  9. Write the updated {@code /index.latest} blob containing the new repository generation {@code N + 1}.
  10. + *
+ * + *

Deleting a Snapshot

+ * + *

Deleting a snapshot is an operation that is exclusively executed on the master node that runs through the following sequence of + * action when {@link org.elasticsearch.repositories.blobstore.BlobStoreRepository#deleteSnapshot} is invoked:

+ * + *
    + *
  1. Get the current {@code RepositoryData} from the latest {@code index-N} blob at the repository root.
  2. + *
  3. Write an updated {@code RepositoryData} blob with the deleted snapshot removed to key {@code /index-${N+1}} directly under the + * repository root.
  4. + *
  5. Write an updated {@code index.latest} blob containing {@code N + 1}.
  6. + *
  7. Delete the global {@code MetaData} blob {@code meta-${snapshot-uuid}.dat} stored directly under the repository root for the snapshot + * as well as the {@code SnapshotInfo} blob at {@code /snap-${snapshot-uuid}.dat}.
  8. + *
  9. For each index referenced by the snapshot: + *
      + *
    1. Delete the snapshot's {@code IndexMetaData} at {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}}.
    2. + *
    3. Go through all shard directories {@code /indices/${index-snapshot-uuid}/${i}} and: + *
        + *
      1. Remove the {@code BlobStoreIndexShardSnapshot} blob at {@code /indices/${index-snapshot-uuid}/${i}/snap-${snapshot-uuid}.dat}.
      2. + *
      3. List all blobs in the shard path {@code /indices/${index-snapshot-uuid}} and build a new {@code BlobStoreIndexShardSnapshots} from + * the remaining {@code BlobStoreIndexShardSnapshot} blobs in the shard. Afterwards, write it to the next shard generation blob at + * {@code /indices/${index-snapshot-uuid}/${i}/index-${N+1}} (The shard's generation is determined from the list of {@code index-N} blobs + * in the shard directory).
      4. + *
      5. Delete all segment blobs (identified by having the data blob prefix {@code __}) in the shard directory which are not referenced by + * the new {@code BlobStoreIndexShardSnapshots} that has been written in the previous step.
      6. + *
      + *
    4. + *
    + *
  10. + *
+ * TODO: The above sequence of actions can lead to leaking files when an index completely goes out of scope. Adjust this documentation once + * https://github.com/elastic/elasticsearch/issues/13159 is fixed. + */ +package org.elasticsearch.repositories.blobstore; diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 8c43ea1ab4e..c6a572efcb7 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -45,6 +45,7 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.InnerHitContextBuilder; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryShardContext; @@ -112,6 +113,7 @@ final class DefaultSearchContext extends SearchContext { private ScriptFieldsContext scriptFields; private FetchSourceContext fetchSourceContext; private DocValueFieldsContext docValueFieldsContext; + private Map innerHits = Collections.emptyMap(); private int from = -1; private int size = -1; private SortAndFormats sort; @@ -180,8 +182,8 @@ final class DefaultSearchContext extends SearchContext { this.relativeTimeSupplier = relativeTimeSupplier; this.timeout = timeout; this.minNodeVersion = minNodeVersion; - queryShardContext = indexService.newQueryShardContext(request.shardId().id(), searcher.getIndexReader(), request::nowInMillis, - shardTarget.getClusterAlias()); + queryShardContext = indexService.newQueryShardContext(request.shardId().id(), searcher, + request::nowInMillis, shardTarget.getClusterAlias()); queryShardContext.setTypes(request.types()); queryBoost = request.indexBoost(); } @@ -396,6 +398,16 @@ final class DefaultSearchContext extends SearchContext { this.highlight = highlight; } + @Override + public void innerHits(Map innerHits) { + this.innerHits = innerHits; + } + + @Override + public Map innerHits() { + return innerHits; + } + @Override public SuggestionSearchContext suggest() { return suggest; diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 51955272b78..407228c0526 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -739,6 +739,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv context.from(source.from()); context.size(source.size()); Map innerHitBuilders = new HashMap<>(); + context.innerHits(innerHitBuilders); if (source.query() != null) { InnerHitContextBuilder.extractInnerHits(source.query(), innerHitBuilders); context.parsedQuery(queryShardContext.toQuery(source.query())); @@ -749,11 +750,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } if (innerHitBuilders.size() > 0) { for (Map.Entry entry : innerHitBuilders.entrySet()) { - try { - entry.getValue().build(context, context.innerHits()); - } catch (IOException e) { - throw new SearchContextException(context, "failed to build inner_hits", e); - } + entry.getValue().validate(queryShardContext); } } if (source.sorts() != null) { @@ -787,7 +784,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv context.terminateAfter(source.terminateAfter()); if (source.aggregations() != null) { try { - AggregatorFactories factories = source.aggregations().build(context, null); + AggregatorFactories factories = source.aggregations().build(queryShardContext, null); context.aggregations(new SearchContextAggregations(factories, multiBucketConsumerService.create())); } catch (IOException e) { throw new AggregationInitializationException("Failed to create aggregators", e); @@ -901,7 +898,16 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } if (source.collapse() != null) { - final CollapseContext collapseContext = source.collapse().build(context); + if (context.scrollContext() != null) { + throw new SearchContextException(context, "cannot use `collapse` in a scroll context"); + } + if (context.searchAfter() != null) { + throw new SearchContextException(context, "cannot use `collapse` in conjunction with `search_after`"); + } + if (context.rescore() != null && context.rescore().isEmpty() == false) { + throw new SearchContextException(context, "cannot use `collapse` in conjunction with `rescore`"); + } + final CollapseContext collapseContext = source.collapse().build(queryShardContext); context.collapse(collapseContext); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java index 117f3a0772a..ac842a70519 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AbstractAggregationBuilder.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.aggregations; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.Collections; @@ -135,13 +135,13 @@ public abstract class AbstractAggregationBuilder aggregators; ObjectArray collectors; - MultiBucketAggregatorWrapper(BigArrays bigArrays, SearchContext context, Aggregator parent, AggregatorFactory factory, - Aggregator first) { + MultiBucketAggregatorWrapper(BigArrays bigArrays, SearchContext context, + Aggregator parent, AggregatorFactory factory, Aggregator first) { this.bigArrays = bigArrays; this.parent = parent; this.factory = factory; @@ -126,7 +127,7 @@ public abstract class AggregatorFactory { aggregators = bigArrays.grow(aggregators, bucket + 1); Aggregator aggregator = aggregators.get(bucket); if (aggregator == null) { - aggregator = factory.create(parent, true); + aggregator = factory.create(context(), parent, true); aggregator.preCollection(); aggregators.set(bucket, aggregator); } @@ -170,7 +171,8 @@ public abstract class AggregatorFactory { protected final AggregatorFactory parent; protected final AggregatorFactories factories; protected final Map metaData; - protected final SearchContext context; + + protected final QueryShardContext queryShardContext; /** * Constructs a new aggregator factory. @@ -180,12 +182,12 @@ public abstract class AggregatorFactory { * @throws IOException * if an error occurs creating the factory */ - public AggregatorFactory(String name, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + public AggregatorFactory(String name, QueryShardContext queryShardContext, AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { this.name = name; - this.context = context; + this.queryShardContext = queryShardContext; this.parent = parent; - this.factories = subFactoriesBuilder.build(context, this); + this.factories = subFactoriesBuilder.build(queryShardContext, this); this.metaData = metaData; } @@ -196,12 +198,18 @@ public abstract class AggregatorFactory { public void doValidate() { } - protected abstract Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException; + protected abstract Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException; /** * Creates the aggregator * + * + * @param searchContext + * The search context * @param parent * The parent aggregator (if this is a top level factory, the * parent will be {@code null}) @@ -213,8 +221,8 @@ public abstract class AggregatorFactory { * * @return The created aggregator */ - public final Aggregator create(Aggregator parent, boolean collectsFromSingleBucket) throws IOException { - return createInternal(parent, collectsFromSingleBucket, this.factories.createPipelineAggregators(), this.metaData); + public final Aggregator create(SearchContext searchContext, Aggregator parent, boolean collectsFromSingleBucket) throws IOException { + return createInternal(searchContext, parent, collectsFromSingleBucket, this.factories.createPipelineAggregators(), this.metaData); } public AggregatorFactory getParent() { @@ -226,11 +234,11 @@ public abstract class AggregatorFactory { * {@link Aggregator}s that only know how to collect bucket {@code 0}, this * returns an aggregator that can collect any bucket. */ - protected static Aggregator asMultiBucketAggregator(final AggregatorFactory factory, final SearchContext context, + protected static Aggregator asMultiBucketAggregator(final AggregatorFactory factory, final SearchContext searchContext, final Aggregator parent) throws IOException { - final Aggregator first = factory.create(parent, true); - final BigArrays bigArrays = context.bigArrays(); - return new MultiBucketAggregatorWrapper(bigArrays, context, parent, factory, first); + final Aggregator first = factory.create(searchContext, parent, true); + final BigArrays bigArrays = searchContext.bigArrays(); + return new MultiBucketAggregatorWrapper(bigArrays, searchContext, parent, factory, first); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java index 6b0530eb62e..7f861b13d94 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -34,7 +35,6 @@ import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.adjacency.AdjacencyMatrixAggregator.KeyedFilter; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; @@ -196,9 +196,9 @@ public class AdjacencyMatrixAggregationBuilder extends AbstractAggregationBuilde @Override - protected AggregatorFactory doBuild(SearchContext context, AggregatorFactory parent, Builder subFactoriesBuilder) + protected AggregatorFactory doBuild(QueryShardContext queryShardContext, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - int maxFilters = context.indexShard().indexSettings().getMaxAdjacencyMatrixFilters(); + int maxFilters = queryShardContext.getIndexSettings().getMaxAdjacencyMatrixFilters(); if (filters.size() > maxFilters){ throw new IllegalArgumentException( "Number of filters is too large, must be less than or equal to: [" + maxFilters + "] but was [" @@ -209,10 +209,10 @@ public class AdjacencyMatrixAggregationBuilder extends AbstractAggregationBuilde List rewrittenFilters = new ArrayList<>(filters.size()); for (KeyedFilter kf : filters) { - rewrittenFilters.add(new KeyedFilter(kf.key(), Rewriteable.rewrite(kf.filter(), context.getQueryShardContext(), true))); + rewrittenFilters.add(new KeyedFilter(kf.key(), Rewriteable.rewrite(kf.filter(), queryShardContext, true))); } - return new AdjacencyMatrixAggregatorFactory(name, rewrittenFilters, separator, context, parent, + return new AdjacencyMatrixAggregatorFactory(name, rewrittenFilters, separator, queryShardContext, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java index 541a8e482c6..83cd593ea6a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -40,26 +41,29 @@ public class AdjacencyMatrixAggregatorFactory extends AggregatorFactory { private final Weight[] weights; private final String separator; - public AdjacencyMatrixAggregatorFactory(String name, List filters, String separator, - SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, - Map metaData) throws IOException { - super(name, context, parent, subFactories, metaData); - IndexSearcher contextSearcher = context.searcher(); + public AdjacencyMatrixAggregatorFactory(String name, List filters, String separator, + QueryShardContext queryShardContext, AggregatorFactory parent, + AggregatorFactories.Builder subFactories, Map metaData) throws IOException { + super(name, queryShardContext, parent, subFactories, metaData); + IndexSearcher contextSearcher = queryShardContext.searcher(); this.separator = separator; weights = new Weight[filters.size()]; keys = new String[filters.size()]; for (int i = 0; i < filters.size(); ++i) { KeyedFilter keyedFilter = filters.get(i); this.keys[i] = keyedFilter.key(); - Query filter = keyedFilter.filter().toQuery(context.getQueryShardContext()); + Query filter = keyedFilter.filter().toQuery(queryShardContext); this.weights[i] = contextSearcher.createWeight(contextSearcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f); } } @Override - public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, - Map metaData) throws IOException { - return new AdjacencyMatrixAggregator(name, factories, separator, keys, weights, context, parent, + public Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new AdjacencyMatrixAggregator(name, factories, separator, keys, weights, searchContext, parent, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index c91e40d4609..b3712e231fd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -25,12 +25,12 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.bucket.nested.NestedAggregatorFactory; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; @@ -195,8 +195,8 @@ public class CompositeAggregationBuilder extends AbstractAggregationBuilder metaData, int size, CompositeValuesSourceConfig[] sources, CompositeKey afterKey) throws IOException { - super(name, context, parent, subFactoriesBuilder, metaData); + super(name, queryShardContext, parent, subFactoriesBuilder, metaData); this.size = size; this.sources = sources; this.afterKey = afterKey; } @Override - protected Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, + protected Aggregator createInternal(SearchContext searchContext, Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) throws IOException { - return new CompositeAggregator(name, factories, context, parent, pipelineAggregators, metaData, + return new CompositeAggregator(name, factories, searchContext, parent, pipelineAggregators, metaData, size, sources, afterKey); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index 6ca4c8ca249..b9c92907866 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -25,11 +25,11 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.sort.SortOrder; import java.io.IOException; @@ -289,15 +289,15 @@ public abstract class CompositeValuesSourceBuilder config) throws IOException; + protected abstract CompositeValuesSourceConfig innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config) throws IOException; - public final CompositeValuesSourceConfig build(SearchContext context) throws IOException { - ValuesSourceConfig config = ValuesSourceConfig.resolve(context.getQueryShardContext(), + public final CompositeValuesSourceConfig build(QueryShardContext queryShardContext) throws IOException { + ValuesSourceConfig config = ValuesSourceConfig.resolve(queryShardContext, valueType, field, script, null,null, format); - return innerBuild(context, config); + return innerBuild(queryShardContext, config); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 71cc5fcd4b2..f1c1f5502df 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.Script; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -36,7 +37,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateIntervalWrappe import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.time.ZoneId; @@ -216,9 +216,9 @@ public class DateHistogramValuesSourceBuilder } @Override - protected CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSourceConfig config) throws IOException { + protected CompositeValuesSourceConfig innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config) throws IOException { Rounding rounding = dateHistogramInterval.createRounding(timeZone()); - ValuesSource orig = config.toValuesSource(context.getQueryShardContext()); + ValuesSource orig = config.toValuesSource(queryShardContext); if (orig == null) { orig = ValuesSource.Numeric.EMPTY; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java index 36debb1e40f..17a5b3c0e99 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GeoTileGridValuesSourceBuilder.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.bucket.geogrid.CellIdSource; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; @@ -33,7 +34,6 @@ import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Objects; @@ -103,8 +103,8 @@ public class GeoTileGridValuesSourceBuilder extends CompositeValuesSourceBuilder } @Override - protected CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSourceConfig config) throws IOException { - ValuesSource orig = config.toValuesSource(context.getQueryShardContext()); + protected CompositeValuesSourceConfig innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config) throws IOException { + ValuesSource orig = config.toValuesSource(queryShardContext); if (orig == null) { orig = ValuesSource.GeoPoint.EMPTY; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java index 6ec1c76d459..daafa6f1441 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java @@ -25,11 +25,11 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Objects; @@ -110,8 +110,8 @@ public class HistogramValuesSourceBuilder extends CompositeValuesSourceBuilder config) throws IOException { - ValuesSource orig = config.toValuesSource(context.getQueryShardContext()); + protected CompositeValuesSourceConfig innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config) throws IOException { + ValuesSource orig = config.toValuesSource(queryShardContext); if (orig == null) { orig = ValuesSource.Numeric.EMPTY; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java index 061b8c5f5de..8d02eb4b19d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/TermsValuesSourceBuilder.java @@ -26,10 +26,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.script.Script; import java.io.IOException; @@ -70,8 +70,8 @@ public class TermsValuesSourceBuilder extends CompositeValuesSourceBuilder config) throws IOException { - ValuesSource vs = config.toValuesSource(context.getQueryShardContext()); + protected CompositeValuesSourceConfig innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config) throws IOException { + ValuesSource vs = config.toValuesSource(queryShardContext); if (vs == null) { // The field is unmapped so we use a value source that can parse any type of values. // This is needed because the after values are parsed even when there are no values to process. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java index e53e8ed3fc2..151b86f51cc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java @@ -25,12 +25,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -93,9 +93,9 @@ public class FilterAggregationBuilder extends AbstractAggregationBuilder metaData) throws IOException { - super(name, context, parent, subFactoriesBuilder, metaData); - filter = filterBuilder.toQuery(context.getQueryShardContext()); + super(name, queryShardContext, parent, subFactoriesBuilder, metaData); + filter = filterBuilder.toQuery(queryShardContext); } /** @@ -57,7 +58,7 @@ public class FilterAggregatorFactory extends AggregatorFactory { */ public Weight getWeight() { if (weight == null) { - IndexSearcher contextSearcher = context.searcher(); + IndexSearcher contextSearcher = queryShardContext.searcher(); try { weight = contextSearcher.createWeight(contextSearcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f); } catch (IOException e) { @@ -68,9 +69,12 @@ public class FilterAggregatorFactory extends AggregatorFactory { } @Override - public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, - Map metaData) throws IOException { - return new FilterAggregator(name, () -> this.getWeight(), factories, context, parent, pipelineAggregators, metaData); + public Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new FilterAggregator(name, () -> this.getWeight(), factories, searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java index add11f16483..0125440fc41 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -34,7 +35,6 @@ import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.bucket.MultiBucketAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.filter.FiltersAggregator.KeyedFilter; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; @@ -220,9 +220,9 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder filters, boolean keyed, boolean otherBucket, - String otherBucketKey, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, - Map metaData) throws IOException { - super(name, context, parent, subFactories, metaData); + String otherBucketKey, QueryShardContext queryShardContext, AggregatorFactory parent, + AggregatorFactories.Builder subFactories, Map metaData) throws IOException { + super(name, queryShardContext, parent, subFactories, metaData); this.keyed = keyed; this.otherBucket = otherBucket; this.otherBucketKey = otherBucketKey; @@ -56,7 +57,7 @@ public class FiltersAggregatorFactory extends AggregatorFactory { for (int i = 0; i < filters.size(); ++i) { KeyedFilter keyedFilter = filters.get(i); this.keys[i] = keyedFilter.key(); - this.filters[i] = keyedFilter.filter().toQuery(context.getQueryShardContext()); + this.filters[i] = keyedFilter.filter().toQuery(queryShardContext); } } @@ -69,10 +70,10 @@ public class FiltersAggregatorFactory extends AggregatorFactory { * Note that as aggregations are initialsed and executed in a serial manner, * no concurrency considerations are necessary here. */ - public Weight[] getWeights() { + public Weight[] getWeights(SearchContext searchContext) { if (weights == null) { try { - IndexSearcher contextSearcher = context.searcher(); + IndexSearcher contextSearcher = searchContext.searcher(); weights = new Weight[filters.length]; for (int i = 0; i < filters.length; ++i) { this.weights[i] = contextSearcher.createWeight(contextSearcher.rewrite(filters[i]), ScoreMode.COMPLETE_NO_SCORES, 1); @@ -85,10 +86,13 @@ public class FiltersAggregatorFactory extends AggregatorFactory { } @Override - public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, - Map metaData) throws IOException { - return new FiltersAggregator(name, factories, keys, () -> getWeights(), keyed, otherBucket ? otherBucketKey : null, context, parent, - pipelineAggregators, metaData); + public Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new FiltersAggregator(name, factories, keys, () -> getWeights(searchContext), keyed, + otherBucket ? otherBucketKey : null, searchContext, parent, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index bae95c84c00..32ff1d07c53 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.bucket.BucketUtils; @@ -37,7 +38,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -110,7 +110,7 @@ public abstract class GeoGridAggregationBuilder extends ValuesSourceAggregationB */ protected abstract ValuesSourceAggregatorFactory createFactory( String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, - SearchContext context, AggregatorFactory parent, Builder subFactoriesBuilder, Map metaData + QueryShardContext queryShardContext, AggregatorFactory parent, Builder subFactoriesBuilder, Map metaData ) throws IOException; public int precision() { @@ -144,8 +144,9 @@ public abstract class GeoGridAggregationBuilder extends ValuesSourceAggregationB } @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, - ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { int shardSize = this.shardSize; @@ -165,7 +166,7 @@ public abstract class GeoGridAggregationBuilder extends ValuesSourceAggregationB if (shardSize < requiredSize) { shardSize = requiredSize; } - return createFactory(name, config, precision, requiredSize, shardSize, context, parent, + return createFactory(name, config, precision, requiredSize, shardSize, queryShardContext, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index cd7e8d41c64..d58beeb781c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -23,13 +23,13 @@ import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -61,10 +61,9 @@ public class GeoHashGridAggregationBuilder extends GeoGridAggregationBuilder { @Override protected ValuesSourceAggregatorFactory createFactory( String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, - SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metaData - ) throws IOException { - return new GeoHashGridAggregatorFactory(name, config, precision, requiredSize, shardSize, context, parent, + QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + return new GeoHashGridAggregatorFactory(name, config, precision, requiredSize, shardSize, queryShardContext, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index c31340e3dea..a049a07f13d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; import org.elasticsearch.geometry.utils.Geohash; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -44,20 +45,22 @@ public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory< private final int shardSize; GeoHashGridAggregatorFactory(String name, ValuesSourceConfig config, int precision, int requiredSize, - int shardSize, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + int shardSize, QueryShardContext queryShardContext, AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.precision = precision; this.requiredSize = requiredSize; this.shardSize = shardSize; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize, Collections.emptyList(), pipelineAggregators, metaData); - return new NonCollectingAggregator(name, context, parent, pipelineAggregators, metaData) { + return new NonCollectingAggregator(name, searchContext, parent, pipelineAggregators, metaData) { @Override public InternalAggregation buildEmptyAggregation() { return aggregation; @@ -66,13 +69,17 @@ public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory< } @Override - protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { + protected Aggregator doCreateInternal(final GeoPoint valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (collectsFromSingleBucket == false) { - return asMultiBucketAggregator(this, context, parent); + return asMultiBucketAggregator(this, searchContext, parent); } CellIdSource cellIdSource = new CellIdSource(valuesSource, precision, Geohash::longEncode); - return new GeoHashGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, context, parent, + return new GeoHashGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java index 966bb81a9f8..b3d98887813 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -22,13 +22,13 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -60,10 +60,10 @@ public class GeoTileGridAggregationBuilder extends GeoGridAggregationBuilder { @Override protected ValuesSourceAggregatorFactory createFactory( String name, ValuesSourceConfig config, int precision, int requiredSize, int shardSize, - SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, + QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData ) throws IOException { - return new GeoTileGridAggregatorFactory(name, config, precision, requiredSize, shardSize, context, parent, + return new GeoTileGridAggregatorFactory(name, config, precision, requiredSize, shardSize, queryShardContext, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java index 34a1bdc35ca..8380a4172c9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -43,21 +44,22 @@ public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory< private final int shardSize; GeoTileGridAggregatorFactory(String name, ValuesSourceConfig config, int precision, int requiredSize, - int shardSize, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData - ) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + int shardSize, QueryShardContext queryShardContext, AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.precision = precision; this.requiredSize = requiredSize; this.shardSize = shardSize; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { final InternalAggregation aggregation = new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), pipelineAggregators, metaData); - return new NonCollectingAggregator(name, context, parent, pipelineAggregators, metaData) { + return new NonCollectingAggregator(name, searchContext, parent, pipelineAggregators, metaData) { @Override public InternalAggregation buildEmptyAggregation() { return aggregation; @@ -66,13 +68,17 @@ public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory< } @Override - protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { + protected Aggregator doCreateInternal(final GeoPoint valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (collectsFromSingleBucket == false) { - return asMultiBucketAggregator(this, context, parent); + return asMultiBucketAggregator(this, searchContext, parent); } CellIdSource cellIdSource = new CellIdSource(valuesSource, precision, GeoTileUtils::longEncode); - return new GeoTileGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, context, parent, + return new GeoTileGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java index a6e85a03157..a4611cc60d4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java @@ -23,11 +23,11 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -61,9 +61,9 @@ public class GlobalAggregationBuilder extends AbstractAggregationBuilder metaData) throws IOException { - super(name, context, parent, subFactories, metaData); + public GlobalAggregatorFactory(String name, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactories, + Map metaData) throws IOException { + super(name, queryShardContext, parent, subFactories, metaData); } @Override - public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, - Map metaData) throws IOException { + public Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (parent != null) { throw new AggregationExecutionException("Aggregation [" + parent.name() + "] cannot have a global " + "sub-aggregation [" + name + "]. Global aggregations can only be defined as top level aggregations"); @@ -47,6 +54,6 @@ public class GlobalAggregatorFactory extends AggregatorFactory { if (collectsFromSingleBucket == false) { throw new IllegalStateException(); } - return new GlobalAggregator(name, factories, context, pipelineAggregators, metaData); + return new GlobalAggregator(name, factories, searchContext, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 53e0f5ef32e..f096eee448d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -41,7 +42,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.time.ZoneId; @@ -184,22 +184,22 @@ public class AutoDateHistogramAggregationBuilder } @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { RoundingInfo[] roundings = buildRoundings(timeZone(), getMinimumIntervalExpression()); int maxRoundingInterval = Arrays.stream(roundings,0, roundings.length-1) .map(rounding -> rounding.innerIntervals) .flatMapToInt(Arrays::stream) .boxed() .reduce(Integer::max).get(); - Settings settings = context.getQueryShardContext().getIndexSettings().getNodeSettings(); + Settings settings = queryShardContext.getIndexSettings().getNodeSettings(); int maxBuckets = MultiBucketConsumerService.MAX_BUCKET_SETTING.get(settings); int bucketCeiling = maxBuckets / maxRoundingInterval; if (numBuckets > bucketCeiling) { throw new IllegalArgumentException(NUM_BUCKETS_FIELD.getPreferredName()+ " must be less than " + bucketCeiling); } - return new AutoDateHistogramAggregatorFactory(name, config, numBuckets, roundings, context, parent, + return new AutoDateHistogramAggregatorFactory(name, config, numBuckets, roundings, queryShardContext, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java index c2ca3b15059..81af173ab52 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -44,34 +45,42 @@ public final class AutoDateHistogramAggregatorFactory ValuesSourceConfig config, int numBuckets, RoundingInfo[] roundingInfos, - SearchContext context, + QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.numBuckets = numBuckets; this.roundingInfos = roundingInfos; } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { + protected Aggregator doCreateInternal(Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (collectsFromSingleBucket == false) { - return asMultiBucketAggregator(this, context, parent); + return asMultiBucketAggregator(this, searchContext, parent); } - return createAggregator(valuesSource, parent, pipelineAggregators, metaData); + return createAggregator(valuesSource, searchContext, parent, pipelineAggregators, metaData); } - private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List pipelineAggregators, - Map metaData) throws IOException { - return new AutoDateHistogramAggregator(name, factories, numBuckets, roundingInfos, valuesSource, config.format(), context, parent, - pipelineAggregators, - metaData); + private Aggregator createAggregator(ValuesSource.Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new AutoDateHistogramAggregator(name, factories, numBuckets, roundingInfos, + valuesSource, config.format(), searchContext, parent, pipelineAggregators, metaData); } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return createAggregator(null, parent, pipelineAggregators, metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return createAggregator(null, searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 15426599d97..d75b3e0ff70 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -52,7 +52,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.time.Instant; @@ -491,11 +490,13 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil } @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { final ZoneId tz = timeZone(); final Rounding rounding = dateHistogramInterval.createRounding(tz); - final ZoneId rewrittenTimeZone = rewriteTimeZone(context.getQueryShardContext()); + final ZoneId rewrittenTimeZone = rewriteTimeZone(queryShardContext); final Rounding shardRounding; if (tz == rewrittenTimeZone) { shardRounding = rounding; @@ -506,10 +507,10 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil ExtendedBounds roundedBounds = null; if (this.extendedBounds != null) { // parse any string bounds to longs and round - roundedBounds = this.extendedBounds.parseAndValidate(name, context, config.format()).round(rounding); + roundedBounds = this.extendedBounds.parseAndValidate(name, queryShardContext, config.format()).round(rounding); } return new DateHistogramAggregatorFactory(name, config, offset, order, keyed, minDocCount, - rounding, shardRounding, roundedBounds, context, parent, subFactoriesBuilder, metaData); + rounding, shardRounding, roundedBounds, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 210012c20a7..86555767e25 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.Rounding; import org.elasticsearch.index.mapper.RangeType; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -48,10 +49,10 @@ public final class DateHistogramAggregatorFactory public DateHistogramAggregatorFactory(String name, ValuesSourceConfig config, long offset, BucketOrder order, boolean keyed, long minDocCount, - Rounding rounding, Rounding shardRounding, ExtendedBounds extendedBounds, SearchContext context, + Rounding rounding, Rounding shardRounding, ExtendedBounds extendedBounds, QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.offset = offset; this.order = order; this.keyed = keyed; @@ -75,20 +76,24 @@ public final class DateHistogramAggregatorFactory } @Override - protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (collectsFromSingleBucket == false) { - return asMultiBucketAggregator(this, context, parent); + return asMultiBucketAggregator(this, searchContext, parent); } if (valuesSource instanceof ValuesSource.Numeric) { - return createAggregator((ValuesSource.Numeric) valuesSource, parent, pipelineAggregators, metaData); + return createAggregator((ValuesSource.Numeric) valuesSource, searchContext, parent, pipelineAggregators, metaData); } else if (valuesSource instanceof ValuesSource.Range) { ValuesSource.Range rangeValueSource = (ValuesSource.Range) valuesSource; if (rangeValueSource.rangeType() != RangeType.DATE) { throw new IllegalArgumentException("Expected date range type but found range type [" + rangeValueSource.rangeType().name + "]"); } - return createRangeAggregator((ValuesSource.Range) valuesSource, parent, pipelineAggregators, metaData); + return createRangeAggregator((ValuesSource.Range) valuesSource, searchContext, parent, pipelineAggregators, metaData); } else { throw new IllegalArgumentException("Expected one of [Date, Range] values source, found [" @@ -96,22 +101,27 @@ public final class DateHistogramAggregatorFactory } } - private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List pipelineAggregators, + private Aggregator createAggregator(ValuesSource.Numeric valuesSource, SearchContext searchContext, + Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { return new DateHistogramAggregator(name, factories, rounding, shardRounding, offset, order, keyed, minDocCount, extendedBounds, - valuesSource, config.format(), context, parent, pipelineAggregators, metaData); + valuesSource, config.format(), searchContext, parent, pipelineAggregators, metaData); } - private Aggregator createRangeAggregator(ValuesSource.Range valuesSource, Aggregator parent, + private Aggregator createRangeAggregator(ValuesSource.Range valuesSource, + SearchContext searchContext, + Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { return new DateRangeHistogramAggregator(name, factories, rounding, shardRounding, offset, order, keyed, minDocCount, extendedBounds, - valuesSource, config.format(), context, parent, pipelineAggregators, metaData); + valuesSource, config.format(), searchContext, parent, pipelineAggregators, metaData); } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return createAggregator(null, parent, pipelineAggregators, metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return createAggregator(null, searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java index b0dfbb9d66e..dc20ff291e0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java @@ -31,9 +31,8 @@ import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.SearchParseException; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Objects; @@ -148,20 +147,20 @@ public class ExtendedBounds implements ToXContentFragment, Writeable { /** * Parse the bounds and perform any delayed validation. Returns the result of the parsing. */ - ExtendedBounds parseAndValidate(String aggName, SearchContext context, DocValueFormat format) { + ExtendedBounds parseAndValidate(String aggName, QueryShardContext queryShardContext, DocValueFormat format) { Long min = this.min; Long max = this.max; assert format != null; if (minAsStr != null) { - min = format.parseLong(minAsStr, false, context.getQueryShardContext()::nowInMillis); + min = format.parseLong(minAsStr, false, queryShardContext::nowInMillis); } if (maxAsStr != null) { // TODO: Should we rather pass roundUp=true? - max = format.parseLong(maxAsStr, false, context.getQueryShardContext()::nowInMillis); + max = format.parseLong(maxAsStr, false, queryShardContext::nowInMillis); } if (min != null && max != null && min.compareTo(max) > 0) { - throw new SearchParseException(context, "[extended_bounds.min][" + min + "] cannot be greater than " + - "[extended_bounds.max][" + max + "] for histogram aggregation [" + aggName + "]", null); + throw new IllegalArgumentException("[extended_bounds.min][" + min + "] cannot be greater than " + + "[extended_bounds.max][" + max + "] for histogram aggregation [" + aggName + "]"); } return new ExtendedBounds(min, max, minAsStr, maxAsStr); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index 833e9e74904..90091b2697d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; @@ -40,7 +41,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.List; @@ -302,10 +302,12 @@ public class HistogramAggregationBuilder extends ValuesSourceAggregationBuilder< } @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { return new HistogramAggregatorFactory(name, config, interval, offset, order, keyed, minDocCount, minBound, maxBound, - context, parent, subFactoriesBuilder, metaData); + queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java index 6fac7e514be..670b8008bc5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -54,11 +55,20 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact + missing + "]"); } - public HistogramAggregatorFactory(String name, ValuesSourceConfig config, double interval, double offset, - BucketOrder order, boolean keyed, long minDocCount, double minBound, double maxBound, - SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + public HistogramAggregatorFactory(String name, + ValuesSourceConfig config, + double interval, + double offset, + BucketOrder order, + boolean keyed, + long minDocCount, + double minBound, + double maxBound, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.interval = interval; this.offset = offset; this.order = order; @@ -73,14 +83,18 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact } @Override - protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (collectsFromSingleBucket == false) { - return asMultiBucketAggregator(this, context, parent); + return asMultiBucketAggregator(this, searchContext, parent); } if (valuesSource instanceof ValuesSource.Numeric) { return new NumericHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, - (ValuesSource.Numeric) valuesSource, config.format(), context, parent, pipelineAggregators, metaData); + (ValuesSource.Numeric) valuesSource, config.format(), searchContext, parent, pipelineAggregators, metaData); } else if (valuesSource instanceof ValuesSource.Range) { ValuesSource.Range rangeValueSource = (ValuesSource.Range) valuesSource; if (rangeValueSource.rangeType().isNumeric() == false) { @@ -88,7 +102,7 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact + rangeValueSource.rangeType().name + "]"); } return new RangeHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, - (ValuesSource.Range) valuesSource, config.format(), context, parent, pipelineAggregators, + (ValuesSource.Range) valuesSource, config.format(), searchContext, parent, pipelineAggregators, metaData); } else { @@ -98,9 +112,11 @@ public final class HistogramAggregatorFactory extends ValuesSourceAggregatorFact } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { return new NumericHistogramAggregator(name, factories, interval, offset, order, keyed, minDocCount, minBound, maxBound, - null, config.format(), context, parent, pipelineAggregators, metaData); + null, config.format(), searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java index c0fd5f26eb5..c685df31ab1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -35,7 +36,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -84,9 +84,11 @@ public class MissingAggregationBuilder extends ValuesSourceAggregationBuilder innerBuild(SearchContext context, - ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new MissingAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { + return new MissingAggregatorFactory(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java index 61705c773d1..6eee7739c45 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.missing; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -34,21 +35,27 @@ import java.util.Map; public class MissingAggregatorFactory extends ValuesSourceAggregatorFactory { - public MissingAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, + public MissingAggregatorFactory(String name, ValuesSourceConfig config, QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override - protected MissingAggregator createUnmapped(Aggregator parent, List pipelineAggregators, - Map metaData) throws IOException { - return new MissingAggregator(name, factories, null, context, parent, pipelineAggregators, metaData); + protected MissingAggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new MissingAggregator(name, factories, null, searchContext, parent, pipelineAggregators, metaData); } @Override - protected MissingAggregator doCreateInternal(ValuesSource valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new MissingAggregator(name, factories, valuesSource, context, parent, pipelineAggregators, metaData); + protected MissingAggregator doCreateInternal(ValuesSource valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new MissingAggregator(name, factories, valuesSource, searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java index e27b341003d..593a15326af 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java @@ -25,12 +25,12 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -87,23 +87,25 @@ public class NestedAggregationBuilder extends AbstractAggregationBuilder metaData) throws IOException { - super(name, context, parent, subFactories, metaData); + QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactories, + Map metaData) throws IOException { + super(name, queryShardContext, parent, subFactories, metaData); this.parentObjectMapper = parentObjectMapper; this.childObjectMapper = childObjectMapper; } @Override - public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, - Map metaData) throws IOException { + public Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (childObjectMapper == null) { - return new Unmapped(name, context, parent, pipelineAggregators, metaData); + return new Unmapped(name, searchContext, parent, pipelineAggregators, metaData); } - return new NestedAggregator(name, factories, parentObjectMapper, childObjectMapper, context, parent, + return new NestedAggregator(name, factories, parentObjectMapper, childObjectMapper, searchContext, parent, pipelineAggregators, metaData, collectsFromSingleBucket); } private static final class Unmapped extends NonCollectingAggregator { - Unmapped(String name, SearchContext context, Aggregator parent, List pipelineAggregators, - Map metaData) throws IOException { + Unmapped(String name, + SearchContext context, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java index 62dfb401176..08c0d86469e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java @@ -25,14 +25,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.NestedScope; -import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -92,28 +91,27 @@ public class ReverseNestedAggregationBuilder extends AbstractAggregationBuilder< } @Override - protected AggregatorFactory doBuild(SearchContext context, AggregatorFactory parent, Builder subFactoriesBuilder) + protected AggregatorFactory doBuild(QueryShardContext queryShardContext, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { if (findNestedAggregatorFactory(parent) == null) { - throw new SearchParseException(context, - "Reverse nested aggregation [" + name + "] can only be used inside a [nested] aggregation", null); + throw new IllegalArgumentException("Reverse nested aggregation [" + name + "] can only be used inside a [nested] aggregation"); } ObjectMapper parentObjectMapper = null; if (path != null) { - parentObjectMapper = context.getObjectMapper(path); + parentObjectMapper = queryShardContext.getObjectMapper(path); if (parentObjectMapper == null) { - return new ReverseNestedAggregatorFactory(name, true, null, context, parent, subFactoriesBuilder, metaData); + return new ReverseNestedAggregatorFactory(name, true, null, queryShardContext, parent, subFactoriesBuilder, metaData); } if (parentObjectMapper.nested().isNested() == false) { throw new AggregationExecutionException("[reverse_nested] nested path [" + path + "] is not nested"); } } - NestedScope nestedScope = context.getQueryShardContext().nestedScope(); + NestedScope nestedScope = queryShardContext.nestedScope(); try { nestedScope.nextLevel(parentObjectMapper); - return new ReverseNestedAggregatorFactory(name, false, parentObjectMapper, context, parent, subFactoriesBuilder, + return new ReverseNestedAggregatorFactory(name, false, parentObjectMapper, queryShardContext, parent, subFactoriesBuilder, metaData); } finally { nestedScope.previousLevel(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java index 022a1cec771..86863c0852b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregatorFactory.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.nested; import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -38,28 +39,35 @@ public class ReverseNestedAggregatorFactory extends AggregatorFactory { private final ObjectMapper parentObjectMapper; public ReverseNestedAggregatorFactory(String name, boolean unmapped, ObjectMapper parentObjectMapper, - SearchContext context, AggregatorFactory parent, + QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { - super(name, context, parent, subFactories, metaData); + super(name, queryShardContext, parent, subFactories, metaData); this.unmapped = unmapped; this.parentObjectMapper = parentObjectMapper; } @Override - public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, - Map metaData) throws IOException { + public Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (unmapped) { - return new Unmapped(name, context, parent, pipelineAggregators, metaData); + return new Unmapped(name, searchContext, parent, pipelineAggregators, metaData); } else { - return new ReverseNestedAggregator(name, factories, parentObjectMapper, context, parent, pipelineAggregators, metaData); + return new ReverseNestedAggregator(name, factories, parentObjectMapper, + searchContext, parent, pipelineAggregators, metaData); } } private static final class Unmapped extends NonCollectingAggregator { - Unmapped(String name, SearchContext context, Aggregator parent, List pipelineAggregators, - Map metaData) throws IOException { + Unmapped(String name, + SearchContext context, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { super(name, context, parent, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java index e1943e6af11..d60851a2d7f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.range; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -41,25 +42,37 @@ public class AbstractRangeAggregatorFactory extends ValuesSourc private final R[] ranges; private final boolean keyed; - public AbstractRangeAggregatorFactory(String name, ValuesSourceConfig config, R[] ranges, boolean keyed, - InternalRange.Factory rangeFactory, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + public AbstractRangeAggregatorFactory(String name, + ValuesSourceConfig config, + R[] ranges, + boolean keyed, + InternalRange.Factory rangeFactory, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.ranges = ranges; this.keyed = keyed; this.rangeFactory = rangeFactory; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return new Unmapped<>(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new Unmapped<>(name, ranges, keyed, config.format(), searchContext, parent, rangeFactory, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new RangeAggregator(name, factories, valuesSource, config.format(), rangeFactory, ranges, keyed, context, parent, + protected Aggregator doCreateInternal(Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new RangeAggregator(name, factories, valuesSource, config.format(), rangeFactory, ranges, keyed, searchContext, parent, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java index 41d30cc9bff..e11c8d201b4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregatorFactory.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.aggregations.bucket.range; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -40,30 +41,30 @@ public class BinaryRangeAggregatorFactory public BinaryRangeAggregatorFactory(String name, ValuesSourceConfig config, List ranges, boolean keyed, - SearchContext context, + QueryShardContext queryShardContext, AggregatorFactory parent, Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.ranges = ranges; this.keyed = keyed; } @Override - protected Aggregator createUnmapped(Aggregator parent, - List pipelineAggregators, - Map metaData) throws IOException { + protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { return new BinaryRangeAggregator(name, factories, null, config.format(), - ranges, keyed, context, parent, pipelineAggregators, metaData); + ranges, keyed, searchContext, parent, pipelineAggregators, metaData); } @Override protected Aggregator doCreateInternal(ValuesSource.Bytes valuesSource, - Aggregator parent, - boolean collectsFromSingleBucket, - List pipelineAggregators, - Map metaData) throws IOException { + SearchContext searchContext, Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { return new BinaryRangeAggregator(name, factories, valuesSource, config.format(), - ranges, keyed, context, parent, pipelineAggregators, metaData); + ranges, keyed, searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java index f0896fb682b..fcefc81b698 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.bucket.range; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; @@ -29,7 +30,6 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.time.ZonedDateTime; @@ -288,8 +288,8 @@ public class DateRangeAggregationBuilder extends AbstractRangeBuilder config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + protected DateRangeAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { // We need to call processRanges here so they are parsed and we know whether `now` has been used before we make // the decision of whether to cache the request RangeAggregator.Range[] ranges = processRanges(range -> { @@ -300,23 +300,23 @@ public class DateRangeAggregationBuilder extends AbstractRangeBuilder { - public DateRangeAggregatorFactory(String name, ValuesSourceConfig config, RangeAggregator.Range[] ranges, boolean keyed, - InternalRange.Factory rangeFactory, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); + public DateRangeAggregatorFactory(String name, + ValuesSourceConfig config, + RangeAggregator.Range[] ranges, + boolean keyed, + InternalRange.Factory rangeFactory, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, ranges, keyed, rangeFactory, queryShardContext, parent, subFactoriesBuilder, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index d9e29d0df46..92d8bbc15a7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -39,7 +40,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; @@ -411,14 +411,15 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde } @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, - ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) - throws IOException { + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { Range[] ranges = this.ranges.toArray(new Range[this.range().size()]); if (ranges.length == 0) { throw new IllegalArgumentException("No [ranges] specified for the [" + this.getName() + "] aggregation"); } - return new GeoDistanceRangeAggregatorFactory(name, config, origin, ranges, unit, distanceType, keyed, context, parent, + return new GeoDistanceRangeAggregatorFactory(name, config, origin, ranges, unit, distanceType, keyed, queryShardContext, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java index b99ae657aae..711297762b8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceRangeAggregatorFactory.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.index.fielddata.MultiGeoPointValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -53,9 +54,9 @@ public class GeoDistanceRangeAggregatorFactory private final boolean keyed; public GeoDistanceRangeAggregatorFactory(String name, ValuesSourceConfig config, GeoPoint origin, - Range[] ranges, DistanceUnit unit, GeoDistance distanceType, boolean keyed, SearchContext context, + Range[] ranges, DistanceUnit unit, GeoDistance distanceType, boolean keyed, QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.origin = origin; this.ranges = ranges; this.unit = unit; @@ -64,17 +65,23 @@ public class GeoDistanceRangeAggregatorFactory } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return new RangeAggregator.Unmapped<>(name, ranges, keyed, config.format(), context, parent, rangeFactory, pipelineAggregators, - metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new RangeAggregator.Unmapped<>(name, ranges, keyed, config.format(), searchContext, parent, + rangeFactory, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { + protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { DistanceSource distanceSource = new DistanceSource(valuesSource, distanceType, origin, unit); - return new RangeAggregator(name, factories, distanceSource, config.format(), rangeFactory, ranges, keyed, context, + return new RangeAggregator(name, factories, distanceSource, config.format(), rangeFactory, ranges, keyed, searchContext, parent, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java index 9ee76784d20..14a8eb3a1cd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; @@ -42,7 +43,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.net.InetAddress; @@ -365,8 +365,8 @@ public final class IpRangeAggregationBuilder @Override protected ValuesSourceAggregatorFactory innerBuild( - SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) + QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { List ranges = new ArrayList<>(); if(this.ranges.size() == 0){ @@ -376,7 +376,7 @@ public final class IpRangeAggregationBuilder ranges.add(new BinaryRangeAggregator.Range(range.key, toBytesRef(range.from), toBytesRef(range.to))); } return new BinaryRangeAggregatorFactory(name, config, ranges, - keyed, context, parent, subFactoriesBuilder, metaData); + keyed, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java index 25c072a7a06..6a954472877 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.bucket.range; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; @@ -30,7 +31,6 @@ import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Range; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -146,8 +146,8 @@ public class RangeAggregationBuilder extends AbstractRangeBuilder config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + protected RangeAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { // We need to call processRanges here so they are parsed before we make the decision of whether to cache the request Range[] ranges = processRanges(range -> { DocValueFormat parser = config.format(); @@ -155,17 +155,17 @@ public class RangeAggregationBuilder extends AbstractRangeBuilder { public RangeAggregatorFactory(String name, ValuesSourceConfig config, Range[] ranges, boolean keyed, - Factory rangeFactory, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metaData); + Factory rangeFactory, QueryShardContext queryShardContext, AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + super(name, config, ranges, keyed, rangeFactory, queryShardContext, parent, subFactoriesBuilder, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java index d4d7d4f8b01..eb7012ea69f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -33,7 +34,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -148,9 +148,11 @@ public class DiversifiedAggregationBuilder extends ValuesSourceAggregationBuilde } @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, - ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new DiversifiedAggregatorFactory(name, config, shardSize, maxDocsPerValue, executionHint, context, parent, + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { + return new DiversifiedAggregatorFactory(name, config, shardSize, maxDocsPerValue, executionHint, queryShardContext, parent, subFactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java index 281e6b0e29d..84ee57ccb08 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.sampler; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -44,20 +45,24 @@ public class DiversifiedAggregatorFactory extends ValuesSourceAggregatorFactory< private final String executionHint; DiversifiedAggregatorFactory(String name, ValuesSourceConfig config, int shardSize, int maxDocsPerValue, - String executionHint, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + String executionHint, QueryShardContext queryShardContext, AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.shardSize = shardSize; this.maxDocsPerValue = maxDocsPerValue; this.executionHint = executionHint; } @Override - protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (valuesSource instanceof ValuesSource.Numeric) { - return new DiversifiedNumericSamplerAggregator(name, shardSize, factories, context, parent, pipelineAggregators, metaData, + return new DiversifiedNumericSamplerAggregator(name, shardSize, factories, searchContext, parent, pipelineAggregators, metaData, (Numeric) valuesSource, maxDocsPerValue); } @@ -75,7 +80,7 @@ public class DiversifiedAggregatorFactory extends ValuesSourceAggregatorFactory< if ((execution.needsGlobalOrdinals()) && (!(valuesSource instanceof ValuesSource.Bytes.WithOrdinals))) { execution = ExecutionMode.MAP; } - return execution.create(name, factories, shardSize, maxDocsPerValue, valuesSource, context, parent, pipelineAggregators, + return execution.create(name, factories, shardSize, maxDocsPerValue, valuesSource, searchContext, parent, pipelineAggregators, metaData); } @@ -84,11 +89,13 @@ public class DiversifiedAggregatorFactory extends ValuesSourceAggregatorFactory< } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { final UnmappedSampler aggregation = new UnmappedSampler(name, pipelineAggregators, metaData); - return new NonCollectingAggregator(name, context, parent, factories, pipelineAggregators, metaData) { + return new NonCollectingAggregator(name, searchContext, parent, factories, pipelineAggregators, metaData) { @Override public InternalAggregation buildEmptyAggregation() { return aggregation; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java index afdb3e5e48c..666d0a424a0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java @@ -24,11 +24,11 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -84,9 +84,9 @@ public class SamplerAggregationBuilder extends AbstractAggregationBuilder metaData) throws IOException { - super(name, context, parent, subFactories, metaData); + SamplerAggregatorFactory(String name, int shardSize, QueryShardContext queryShardContext, AggregatorFactory parent, + AggregatorFactories.Builder subFactories, Map metaData) throws IOException { + super(name, queryShardContext, parent, subFactories, metaData); this.shardSize = shardSize; } @Override - public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, - Map metaData) throws IOException { - return new SamplerAggregator(name, shardSize, factories, context, parent, pipelineAggregators, metaData); + public Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new SamplerAggregator(name, shardSize, factories, searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java index dab9cf34dbb..1eda817b494 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregationBuilder.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.ParseFieldRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; @@ -46,7 +47,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -284,11 +284,13 @@ public class SignificantTermsAggregationBuilder extends ValuesSourceAggregationB } @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - SignificanceHeuristic executionHeuristic = this.significanceHeuristic.rewrite(context); + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { + SignificanceHeuristic executionHeuristic = this.significanceHeuristic.rewrite(queryShardContext); return new SignificantTermsAggregatorFactory(name, config, includeExclude, executionHint, filterBuilder, - bucketCountThresholds, executionHeuristic, context, parent, subFactoriesBuilder, metaData); + bucketCountThresholds, executionHeuristic, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index dbe8873f88a..0687c81a648 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.lucene.index.FilterableTermsEnum; import org.elasticsearch.common.lucene.index.FreqTermsEnum; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; @@ -81,11 +82,11 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac QueryBuilder filterBuilder, TermsAggregator.BucketCountThresholds bucketCountThresholds, SignificanceHeuristic significanceHeuristic, - SearchContext context, + QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); if (!config.unmapped()) { this.fieldType = config.fieldContext().fieldType(); @@ -96,8 +97,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac this.executionHint = executionHint; this.filter = filterBuilder == null ? null - : filterBuilder.toQuery(context.getQueryShardContext()); - IndexSearcher searcher = context.searcher(); + : filterBuilder.toQuery(queryShardContext); + IndexSearcher searcher = queryShardContext.searcher(); this.supersetNumDocs = filter == null // Important - need to use the doc count that includes deleted docs // or we have this issue: https://github.com/elastic/elasticsearch/issues/7951 @@ -118,9 +119,9 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac if (termsEnum != null) { return termsEnum; } - IndexReader reader = context.searcher().getIndexReader(); + IndexReader reader = queryShardContext.getIndexReader(); if (numberOfAggregatorsCreated > 1) { - termsEnum = new FreqTermsEnum(reader, field, true, false, filter, context.bigArrays()); + termsEnum = new FreqTermsEnum(reader, field, true, false, filter, queryShardContext.bigArrays()); } else { termsEnum = new FilterableTermsEnum(reader, indexedFieldName, PostingsEnum.NONE, filter); } @@ -128,7 +129,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } private long getBackgroundFrequency(String value) throws IOException { - Query query = fieldType.termQuery(value, context.getQueryShardContext()); + Query query = fieldType.termQuery(value, queryShardContext); if (query instanceof TermQuery) { // for types that use the inverted index, we prefer using a caching terms // enum that will do a better job at reusing index inputs @@ -147,7 +148,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac .add(filter, Occur.FILTER) .build(); } - return context.searcher().count(query); + return queryShardContext.searcher().count(query); } public long getBackgroundFrequency(BytesRef termBytes) throws IOException { @@ -161,11 +162,13 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { final InternalAggregation aggregation = new UnmappedSignificantTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators, metaData); - return new NonCollectingAggregator(name, context, parent, pipelineAggregators, metaData) { + return new NonCollectingAggregator(name, searchContext, parent, pipelineAggregators, metaData) { @Override public InternalAggregation buildEmptyAggregation() { return aggregation; @@ -174,10 +177,14 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } @Override - protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (collectsFromSingleBucket == false) { - return asMultiBucketAggregator(this, context, parent); + return asMultiBucketAggregator(this, searchContext, parent); } numberOfAggregatorsCreated++; @@ -218,7 +225,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac + "include/exclude clauses"); } - return execution.create(name, factories, valuesSource, format, bucketCountThresholds, includeExclude, context, parent, + return execution.create(name, factories, valuesSource, format, bucketCountThresholds, includeExclude, searchContext, parent, significanceHeuristic, this, pipelineAggregators, metaData); } @@ -238,7 +245,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac longFilter = includeExclude.convertToLongFilter(config.format()); } return new SignificantLongTermsAggregator(name, factories, (ValuesSource.Numeric) valuesSource, config.format(), - bucketCountThresholds, context, parent, significanceHeuristic, this, longFilter, pipelineAggregators, + bucketCountThresholds, searchContext, parent, significanceHeuristic, this, longFilter, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregationBuilder.java index 7d53776e4fe..d2e3729c335 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregationBuilder.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationInitializationException; @@ -40,7 +41,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator.BucketCountThresholds; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Arrays; @@ -340,12 +340,12 @@ public class SignificantTextAggregationBuilder extends AbstractAggregationBuilde } @Override - protected AggregatorFactory doBuild(SearchContext context, AggregatorFactory parent, - Builder subFactoriesBuilder) throws IOException { - SignificanceHeuristic executionHeuristic = this.significanceHeuristic.rewrite(context); + protected AggregatorFactory doBuild(QueryShardContext queryShardContext, AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { + SignificanceHeuristic executionHeuristic = this.significanceHeuristic.rewrite(queryShardContext); return new SignificantTextAggregatorFactory(name, includeExclude, filterBuilder, - bucketCountThresholds, executionHeuristic, context, parent, subFactoriesBuilder, + bucketCountThresholds, executionHeuristic, queryShardContext, parent, subFactoriesBuilder, fieldName, sourceFieldNames, filterDuplicateText, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java index eb33c6dbc44..4930c213443 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorFactory.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.lucene.index.FilterableTermsEnum; import org.elasticsearch.common.lucene.index.FreqTermsEnum; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -65,16 +66,23 @@ public class SignificantTextAggregatorFactory extends AggregatorFactory private final DocValueFormat format = DocValueFormat.RAW; private final boolean filterDuplicateText; - public SignificantTextAggregatorFactory(String name, IncludeExclude includeExclude, - QueryBuilder filterBuilder, TermsAggregator.BucketCountThresholds bucketCountThresholds, - SignificanceHeuristic significanceHeuristic, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, String fieldName, String [] sourceFieldNames, - boolean filterDuplicateText, Map metaData) throws IOException { - super(name, context, parent, subFactoriesBuilder, metaData); + public SignificantTextAggregatorFactory(String name, + IncludeExclude includeExclude, + QueryBuilder filterBuilder, + TermsAggregator.BucketCountThresholds bucketCountThresholds, + SignificanceHeuristic significanceHeuristic, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + String fieldName, + String [] sourceFieldNames, + boolean filterDuplicateText, + Map metaData) throws IOException { + super(name, queryShardContext, parent, subFactoriesBuilder, metaData); // Note that if the field is unmapped (its field type is null), we don't fail, // and just use the given field name as a placeholder. - this.fieldType = context.getQueryShardContext().fieldMapper(fieldName); + this.fieldType = queryShardContext.fieldMapper(fieldName); this.indexedFieldName = fieldType != null ? fieldType.name() : fieldName; this.sourceFieldNames = sourceFieldNames == null ? new String[] { indexedFieldName } @@ -83,9 +91,9 @@ public class SignificantTextAggregatorFactory extends AggregatorFactory this.includeExclude = includeExclude; this.filter = filterBuilder == null ? null - : filterBuilder.toQuery(context.getQueryShardContext()); + : filterBuilder.toQuery(queryShardContext); this.filterDuplicateText = filterDuplicateText; - IndexSearcher searcher = context.searcher(); + IndexSearcher searcher = queryShardContext.searcher(); // Important - need to use the doc count that includes deleted docs // or we have this issue: https://github.com/elastic/elasticsearch/issues/7951 this.supersetNumDocs = filter == null @@ -106,9 +114,9 @@ public class SignificantTextAggregatorFactory extends AggregatorFactory if (termsEnum != null) { return termsEnum; } - IndexReader reader = context.searcher().getIndexReader(); + IndexReader reader = queryShardContext.getIndexReader(); if (numberOfAggregatorsCreated > 1) { - termsEnum = new FreqTermsEnum(reader, field, true, false, filter, context.bigArrays()); + termsEnum = new FreqTermsEnum(reader, field, true, false, filter, queryShardContext.bigArrays()); } else { termsEnum = new FilterableTermsEnum(reader, indexedFieldName, PostingsEnum.NONE, filter); } @@ -116,7 +124,7 @@ public class SignificantTextAggregatorFactory extends AggregatorFactory } private long getBackgroundFrequency(String value) throws IOException { - Query query = fieldType.termQuery(value, context.getQueryShardContext()); + Query query = fieldType.termQuery(value, queryShardContext); if (query instanceof TermQuery) { // for types that use the inverted index, we prefer using a caching terms // enum that will do a better job at reusing index inputs @@ -135,7 +143,7 @@ public class SignificantTextAggregatorFactory extends AggregatorFactory .add(filter, Occur.FILTER) .build(); } - return context.searcher().count(query); + return queryShardContext.searcher().count(query); } public long getBackgroundFrequency(BytesRef termBytes) throws IOException { @@ -156,11 +164,11 @@ public class SignificantTextAggregatorFactory extends AggregatorFactory } @Override - protected Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) + protected Aggregator createInternal(SearchContext searchContext, Aggregator parent, boolean collectsFromSingleBucket, + List pipelineAggregators, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { - return asMultiBucketAggregator(this, context, parent); + return asMultiBucketAggregator(this, searchContext, parent); } numberOfAggregatorsCreated++; @@ -183,7 +191,7 @@ public class SignificantTextAggregatorFactory extends AggregatorFactory IncludeExclude.StringFilter incExcFilter = includeExclude == null ? null: includeExclude.convertToStringFilter(DocValueFormat.RAW); - return new SignificantTextAggregator(name, factories, context, parent, pipelineAggregators, bucketCountThresholds, + return new SignificantTextAggregator(name, factories, searchContext, parent, pipelineAggregators, bucketCountThresholds, incExcFilter, significanceHeuristic, this, indexedFieldName, sourceFieldNames, filterDuplicateText, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index 3d142742b5c..50ef203880d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.script.Script; import org.elasticsearch.script.SignificantTermsHeuristicScoreScript; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.HashMap; @@ -101,9 +100,8 @@ public class ScriptHeuristic extends SignificanceHeuristic { } @Override - public SignificanceHeuristic rewrite(SearchContext context) { - QueryShardContext shardContext = context.getQueryShardContext(); - SignificantTermsHeuristicScoreScript.Factory compiledScript = shardContext.getScriptService().compile(script, + public SignificanceHeuristic rewrite(QueryShardContext queryShardContext) { + SignificantTermsHeuristicScoreScript.Factory compiledScript = queryShardContext.getScriptService().compile(script, SignificantTermsHeuristicScoreScript.CONTEXT); return new ExecutableScriptHeuristic(script, compiledScript.newInstance()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java index def7c0234b2..db24244d236 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java @@ -21,9 +21,9 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms; -import org.elasticsearch.search.internal.SearchContext; /** * Heuristic for that {@link SignificantTerms} uses to pick out significant terms. @@ -65,10 +65,10 @@ public abstract class SignificanceHeuristic implements NamedWriteable, ToXConten /** * Provides a hook for subclasses to provide a version of the heuristic * prepared for execution on data on a shard. - * @param context the search context on the data node + * @param queryShardContext the shard context on the data node * @return a version of this heuristic suitable for execution */ - public SignificanceHeuristic rewrite(SearchContext context) { + public SignificanceHeuristic rewrite(QueryShardContext queryShardContext) { return this; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java index f22eaf4d28a..44c24e32482 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -35,7 +36,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -162,12 +162,12 @@ public class RareTermsAggregationBuilder extends ValuesSourceAggregationBuilder< } @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, - ValuesSourceConfig config, - AggregatorFactory parent, - Builder subFactoriesBuilder) throws IOException { + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { return new RareTermsAggregatorFactory(name, config, includeExclude, - context, parent, subFactoriesBuilder, metaData, maxDocCount, precision); + queryShardContext, parent, subFactoriesBuilder, metaData, maxDocCount, precision); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java index ab4c4bc0084..e30868b3440 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; @@ -45,20 +46,22 @@ public class RareTermsAggregatorFactory extends ValuesSourceAggregatorFactory config, IncludeExclude includeExclude, - SearchContext context, + QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData, int maxDocCount, double precision) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.includeExclude = includeExclude; this.maxDocCount = maxDocCount; this.precision = precision; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { final InternalAggregation aggregation = new UnmappedRareTerms(name, pipelineAggregators, metaData); - return new NonCollectingAggregator(name, context, parent, factories, pipelineAggregators, metaData) { + return new NonCollectingAggregator(name, searchContext, parent, factories, pipelineAggregators, metaData) { @Override public InternalAggregation buildEmptyAggregation() { return aggregation; @@ -67,10 +70,14 @@ public class RareTermsAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (collectsFromSingleBucket == false) { - return asMultiBucketAggregator(this, context, parent); + return asMultiBucketAggregator(this, searchContext, parent); } if (valuesSource instanceof ValuesSource.Bytes) { ExecutionMode execution = ExecutionMode.MAP; //TODO global ords not implemented yet, only supports "map" @@ -83,7 +90,7 @@ public class RareTermsAggregatorFactory extends ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { return new TermsAggregatorFactory(name, config, order, includeExclude, executionHint, collectMode, - bucketCountThresholds, showTermDocCountError, context, parent, subFactoriesBuilder, metaData); + bucketCountThresholds, showTermDocCountError, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 96daeeb476a..340b868f448 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; @@ -66,11 +67,11 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.order = order; this.includeExclude = includeExclude; this.executionHint = executionHint; @@ -80,11 +81,13 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, Map metaData) - throws IOException { + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { final InternalAggregation aggregation = new UnmappedTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), pipelineAggregators, metaData); - return new NonCollectingAggregator(name, context, parent, factories, pipelineAggregators, metaData) { + return new NonCollectingAggregator(name, searchContext, parent, factories, pipelineAggregators, metaData) { { // even in the case of an unmapped aggregator, validate the // order @@ -110,10 +113,14 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory pipelineAggregators, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (collectsFromSingleBucket == false) { - return asMultiBucketAggregator(this, context, parent); + return asMultiBucketAggregator(this, searchContext, parent); } BucketCountThresholds bucketCountThresholds = new BucketCountThresholds(this.bucketCountThresholds); if (InternalOrder.isKeyOrder(order) == false @@ -133,7 +140,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new AvgAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); + protected AvgAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + return new AvgAggregatorFactory(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorFactory.java index 2b1c53535c9..39a6c40aace 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -35,20 +36,26 @@ import java.util.Map; class AvgAggregatorFactory extends ValuesSourceAggregatorFactory { - AvgAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, + AvgAggregatorFactory(String name, ValuesSourceConfig config, QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return new AvgAggregator(name, null, config.format(), context, parent, pipelineAggregators, metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new AvgAggregator(name, null, config.format(), searchContext, parent, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new AvgAggregator(name, valuesSource, config.format(), context, parent, pipelineAggregators, metaData); + protected Aggregator doCreateInternal(Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new AvgAggregator(name, valuesSource, config.format(), searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index 0cc2b7d09c0..587476bb132 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -35,7 +36,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -124,9 +124,9 @@ public final class CardinalityAggregationBuilder } @Override - protected CardinalityAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new CardinalityAggregatorFactory(name, config, precisionThreshold, context, parent, subFactoriesBuilder, metaData); + protected CardinalityAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + return new CardinalityAggregatorFactory(name, config, precisionThreshold, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java index 8927697b90f..4fa4a8dddac 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -36,23 +37,32 @@ class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory config, Long precisionThreshold, - SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + CardinalityAggregatorFactory(String name, ValuesSourceConfig config, + Long precisionThreshold, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.precisionThreshold = precisionThreshold; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return new CardinalityAggregator(name, null, precision(), context, parent, pipelineAggregators, metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new CardinalityAggregator(name, null, precision(), searchContext, parent, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new CardinalityAggregator(name, valuesSource, precision(), context, parent, pipelineAggregators, + protected Aggregator doCreateInternal(ValuesSource valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new CardinalityAggregator(name, valuesSource, precision(), searchContext, parent, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java index af9e8bbca8d..e1a7d52bd44 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -34,7 +35,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -98,9 +98,9 @@ public class ExtendedStatsAggregationBuilder } @Override - protected ExtendedStatsAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new ExtendedStatsAggregatorFactory(name, config, sigma, context, parent, subFactoriesBuilder, metaData); + protected ExtendedStatsAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + return new ExtendedStatsAggregatorFactory(name, config, sigma, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java index 31e38a3bfd1..527c6093aa8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -37,22 +38,34 @@ class ExtendedStatsAggregatorFactory extends ValuesSourceAggregatorFactory config, double sigma, - SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + ExtendedStatsAggregatorFactory(String name, + ValuesSourceConfig config, + double sigma, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.sigma = sigma; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return new ExtendedStatsAggregator(name, null, config.format(), context, parent, sigma, pipelineAggregators, metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new ExtendedStatsAggregator(name, null, config.format(), searchContext, + parent, sigma, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new ExtendedStatsAggregator(name, valuesSource, config.format(), context, parent, sigma, pipelineAggregators, metaData); + protected Aggregator doCreateInternal(Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new ExtendedStatsAggregator(name, valuesSource, config.format(), searchContext, + parent, sigma, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java index 6f6101fc45e..89605512018 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -33,7 +34,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -98,9 +98,9 @@ public class GeoBoundsAggregationBuilder extends ValuesSourceAggregationBuilder< } @Override - protected GeoBoundsAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new GeoBoundsAggregatorFactory(name, config, wrapLongitude, context, parent, subFactoriesBuilder, metaData); + protected GeoBoundsAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + return new GeoBoundsAggregatorFactory(name, config, wrapLongitude, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java index de8936079c2..462aff381d3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -36,22 +37,32 @@ class GeoBoundsAggregatorFactory extends ValuesSourceAggregatorFactory config, boolean wrapLongitude, - SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + GeoBoundsAggregatorFactory(String name, + ValuesSourceConfig config, + boolean wrapLongitude, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.wrapLongitude = wrapLongitude; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return new GeoBoundsAggregator(name, context, parent, null, wrapLongitude, pipelineAggregators, metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new GeoBoundsAggregator(name, searchContext, parent, null, wrapLongitude, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.GeoPoint valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new GeoBoundsAggregator(name, context, parent, valuesSource, wrapLongitude, pipelineAggregators, metaData); + protected Aggregator doCreateInternal(ValuesSource.GeoPoint valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new GeoBoundsAggregator(name, searchContext, parent, valuesSource, wrapLongitude, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java index 98e8f2e9dbf..c40de7f7790 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -33,7 +34,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -78,9 +78,9 @@ public class GeoCentroidAggregationBuilder } @Override - protected GeoCentroidAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new GeoCentroidAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); + protected GeoCentroidAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + return new GeoCentroidAggregatorFactory(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java index b12ce921b7d..73200f18d91 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -34,22 +35,30 @@ import java.util.Map; class GeoCentroidAggregatorFactory extends ValuesSourceAggregatorFactory { - GeoCentroidAggregatorFactory(String name, ValuesSourceConfig config, - SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + GeoCentroidAggregatorFactory(String name, + ValuesSourceConfig config, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override - protected Aggregator createUnmapped(Aggregator parent, - List pipelineAggregators, Map metaData) throws IOException { - return new GeoCentroidAggregator(name, context, parent, null, pipelineAggregators, metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new GeoCentroidAggregator(name, searchContext, parent, null, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.GeoPoint valuesSource, Aggregator parent, - boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) - throws IOException { - return new GeoCentroidAggregator(name, context, parent, valuesSource, pipelineAggregators, metaData); + protected Aggregator doCreateInternal(ValuesSource.GeoPoint valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new GeoCentroidAggregator(name, searchContext, parent, valuesSource, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorFactory.java index cfea50bc312..dd191e8c457 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -41,25 +42,32 @@ class HDRPercentileRanksAggregatorFactory private final boolean keyed; HDRPercentileRanksAggregatorFactory(String name, ValuesSourceConfig config, double[] values, - int numberOfSignificantValueDigits, boolean keyed, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + int numberOfSignificantValueDigits, boolean keyed, QueryShardContext queryShardContext, + AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.values = values; this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; this.keyed = keyed; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return new HDRPercentileRanksAggregator(name, null, context, parent, values, numberOfSignificantValueDigits, keyed, + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new HDRPercentileRanksAggregator(name, null, searchContext, parent, values, numberOfSignificantValueDigits, keyed, config.format(), pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new HDRPercentileRanksAggregator(name, valuesSource, context, parent, values, numberOfSignificantValueDigits, keyed, + protected Aggregator doCreateInternal(Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new HDRPercentileRanksAggregator(name, valuesSource, searchContext, parent, values, numberOfSignificantValueDigits, keyed, config.format(), pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorFactory.java index 584ff30f53d..de5af206c53 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -39,26 +40,39 @@ class HDRPercentilesAggregatorFactory extends ValuesSourceAggregatorFactory config, double[] percents, - int numberOfSignificantValueDigits, boolean keyed, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + HDRPercentilesAggregatorFactory(String name, + ValuesSourceConfig config, + double[] percents, + int numberOfSignificantValueDigits, + boolean keyed, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.percents = percents; this.numberOfSignificantValueDigits = numberOfSignificantValueDigits; this.keyed = keyed; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { - return new HDRPercentilesAggregator(name, null, context, parent, percents, numberOfSignificantValueDigits, keyed, + return new HDRPercentilesAggregator(name, null, searchContext, parent, percents, numberOfSignificantValueDigits, keyed, config.format(), pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new HDRPercentilesAggregator(name, valuesSource, context, parent, percents, numberOfSignificantValueDigits, keyed, + protected Aggregator doCreateInternal(Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new HDRPercentilesAggregator(name, valuesSource, searchContext, parent, percents, numberOfSignificantValueDigits, keyed, config.format(), pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java index b5d788cfc85..e8412a6b40e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -34,7 +35,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -78,9 +78,9 @@ public class MaxAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn } @Override - protected MaxAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new MaxAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); + protected MaxAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + return new MaxAggregatorFactory(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorFactory.java index 2ae76239681..b345cca7dc6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -35,21 +36,26 @@ import java.util.Map; class MaxAggregatorFactory extends ValuesSourceAggregatorFactory { - MaxAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, + MaxAggregatorFactory(String name, ValuesSourceConfig config, QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override - protected Aggregator createUnmapped(Aggregator parent, - List pipelineAggregators, Map metaData) throws IOException { - return new MaxAggregator(name, config, null, context, parent, pipelineAggregators, metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new MaxAggregator(name, config, null, searchContext, parent, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, - boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) - throws IOException { - return new MaxAggregator(name, config, valuesSource, context, parent, pipelineAggregators, metaData); + protected Aggregator doCreateInternal(Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new MaxAggregator(name, config, valuesSource, searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java index 6218fd3e52b..dae78275f50 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -35,7 +36,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -107,13 +107,13 @@ public class MedianAbsoluteDeviationAggregationBuilder extends LeafOnly innerBuild(SearchContext context, - ValuesSourceConfig config, - AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder) - throws IOException { - - return new MedianAbsoluteDeviationAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData, compression); + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder) + throws IOException { + return new MedianAbsoluteDeviationAggregatorFactory(name, config, queryShardContext, + parent, subFactoriesBuilder, metaData, compression); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java index 037e743ad32..1f26b634cc2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -38,24 +39,25 @@ public class MedianAbsoluteDeviationAggregatorFactory extends ValuesSourceAggreg MedianAbsoluteDeviationAggregatorFactory(String name, ValuesSourceConfig config, - SearchContext context, + QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData, double compression) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.compression = compression; } @Override - protected Aggregator createUnmapped(Aggregator parent, - List pipelineAggregators, - Map metaData) throws IOException { + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { return new MedianAbsoluteDeviationAggregator( name, - context, + searchContext, parent, pipelineAggregators, metaData, @@ -67,14 +69,15 @@ public class MedianAbsoluteDeviationAggregatorFactory extends ValuesSourceAggreg @Override protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, - Aggregator parent, - boolean collectsFromSingleBucket, - List pipelineAggregators, - Map metaData) throws IOException { + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { return new MedianAbsoluteDeviationAggregator( name, - context, + searchContext, parent, pipelineAggregators, metaData, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java index f195e10c65e..61222285b8e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -34,7 +35,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -78,9 +78,9 @@ public class MinAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn } @Override - protected MinAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new MinAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); + protected MinAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + return new MinAggregatorFactory(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorFactory.java index 23e55627bab..753c7643c20 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -35,20 +36,26 @@ import java.util.Map; class MinAggregatorFactory extends ValuesSourceAggregatorFactory { - MinAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, + MinAggregatorFactory(String name, ValuesSourceConfig config, QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return new MinAggregator(name, config, null, context, parent, pipelineAggregators, metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new MinAggregator(name, config, null, searchContext, parent, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new MinAggregator(name, config, valuesSource, context, parent, pipelineAggregators, metaData); + protected Aggregator doCreateInternal(Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new MinAggregator(name, config, valuesSource, searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java index d4a600b11b5..d1a04667c6b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -37,7 +38,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Arrays; @@ -240,14 +240,14 @@ public class PercentileRanksAggregationBuilder extends LeafOnly innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { switch (method) { case TDIGEST: - return new TDigestPercentileRanksAggregatorFactory(name, config, values, compression, keyed, context, parent, + return new TDigestPercentileRanksAggregatorFactory(name, config, values, compression, keyed, queryShardContext, parent, subFactoriesBuilder, metaData); case HDR: - return new HDRPercentileRanksAggregatorFactory(name, config, values, numberOfSignificantValueDigits, keyed, context, + return new HDRPercentileRanksAggregatorFactory(name, config, values, numberOfSignificantValueDigits, keyed, queryShardContext, parent, subFactoriesBuilder, metaData); default: throw new IllegalStateException("Illegal method [" + method + "]"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java index 302e05a600a..5b1da34accc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -36,7 +37,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Arrays; @@ -263,15 +263,17 @@ public class PercentilesAggregationBuilder extends LeafOnly innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { switch (method) { case TDIGEST: - return new TDigestPercentilesAggregatorFactory(name, config, percents, compression, keyed, context, parent, + return new TDigestPercentilesAggregatorFactory(name, config, percents, compression, keyed, queryShardContext, parent, subFactoriesBuilder, metaData); case HDR: - return new HDRPercentilesAggregatorFactory(name, config, percents, numberOfSignificantValueDigits, keyed, context, parent, - subFactoriesBuilder, metaData); + return new HDRPercentilesAggregatorFactory(name, config, percents, + numberOfSignificantValueDigits, keyed, queryShardContext, parent, subFactoriesBuilder, metaData); default: throw new IllegalStateException("Illegal method [" + method + "]"); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java index 0944ade85f1..e650a968036 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java @@ -32,7 +32,6 @@ import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Collections; @@ -193,8 +192,8 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder } @Override - protected ScriptedMetricAggregatorFactory doBuild(SearchContext context, AggregatorFactory parent, - Builder subfactoriesBuilder) throws IOException { + protected ScriptedMetricAggregatorFactory doBuild(QueryShardContext queryShardContext, AggregatorFactory parent, + Builder subfactoriesBuilder) throws IOException { if (combineScript == null) { throw new IllegalArgumentException("[combineScript] must not be null: [" + name + "]"); @@ -204,8 +203,6 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder throw new IllegalArgumentException("[reduceScript] must not be null: [" + name + "]"); } - QueryShardContext queryShardContext = context.getQueryShardContext(); - // Extract params from scripts and pass them along to ScriptedMetricAggregatorFactory, since it won't have // access to them for the scripts it's given precompiled. @@ -233,7 +230,7 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder return new ScriptedMetricAggregatorFactory(name, compiledMapScript, mapScriptParams, compiledInitScript, initScriptParams, compiledCombineScript, combineScriptParams, reduceScript, - params, queryShardContext.lookup(), context, parent, subfactoriesBuilder, metaData); + params, queryShardContext.lookup(), queryShardContext, parent, subfactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java index 5c0ceeb1043..2c4d73fc752 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.ScriptedMetricAggContexts; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.script.Script; @@ -53,9 +54,9 @@ class ScriptedMetricAggregatorFactory extends AggregatorFactory { ScriptedMetricAggContexts.InitScript.Factory initScript, Map initScriptParams, ScriptedMetricAggContexts.CombineScript.Factory combineScript, Map combineScriptParams, Script reduceScript, Map aggParams, - SearchLookup lookup, SearchContext context, AggregatorFactory parent, + SearchLookup lookup, QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { - super(name, context, parent, subFactories, metaData); + super(name, queryShardContext, parent, subFactories, metaData); this.mapScript = mapScript; this.mapScriptParams = mapScriptParams; this.initScript = initScript; @@ -68,14 +69,17 @@ class ScriptedMetricAggregatorFactory extends AggregatorFactory { } @Override - public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, - Map metaData) throws IOException { + public Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { if (collectsFromSingleBucket == false) { - return asMultiBucketAggregator(this, context, parent); + return asMultiBucketAggregator(this, searchContext, parent); } Map aggParams = this.aggParams; if (aggParams != null) { - aggParams = deepCopyParams(aggParams, context); + aggParams = deepCopyParams(aggParams, searchContext); } else { aggParams = new HashMap<>(); } @@ -89,13 +93,13 @@ class ScriptedMetricAggregatorFactory extends AggregatorFactory { final ScriptedMetricAggContexts.CombineScript combineScript = this.combineScript.newInstance( mergeParams(aggParams, combineScriptParams), aggState); - final Script reduceScript = deepCopyScript(this.reduceScript, context, aggParams); + final Script reduceScript = deepCopyScript(this.reduceScript, searchContext, aggParams); if (initScript != null) { initScript.execute(); CollectionUtils.ensureNoSelfReferences(aggState, "Scripted metric aggs init script"); } return new ScriptedMetricAggregator(name, mapScript, - combineScript, reduceScript, aggState, context, parent, + combineScript, reduceScript, aggState, searchContext, parent, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java index 34e5ab28641..c1e51a6bdcd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -34,7 +35,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -79,9 +79,9 @@ public class StatsAggregationBuilder extends ValuesSourceAggregationBuilder.Leaf } @Override - protected StatsAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new StatsAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); + protected StatsAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + return new StatsAggregatorFactory(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorFactory.java index bc0e8a44e77..6f714440072 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -35,20 +36,31 @@ import java.util.Map; class StatsAggregatorFactory extends ValuesSourceAggregatorFactory { - StatsAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, - AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + StatsAggregatorFactory(String name, + ValuesSourceConfig config, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { - return new StatsAggregator(name, null, config.format(), context, parent, pipelineAggregators, metaData); + return new StatsAggregator(name, null, config.format(), searchContext, parent, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new StatsAggregator(name, valuesSource, config.format(), context, parent, pipelineAggregators, metaData); + protected Aggregator doCreateInternal(Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new StatsAggregator(name, valuesSource, config.format(), searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java index 9f320b09810..14530d450a0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -34,7 +35,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -78,9 +78,9 @@ public class SumAggregationBuilder extends ValuesSourceAggregationBuilder.LeafOn } @Override - protected SumAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - return new SumAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); + protected SumAggregatorFactory innerBuild(QueryShardContext queryShardContext, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + return new SumAggregatorFactory(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorFactory.java index a59e6dbf1e7..9e547344042 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -35,20 +36,31 @@ import java.util.Map; class SumAggregatorFactory extends ValuesSourceAggregatorFactory { - SumAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, - AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + SumAggregatorFactory(String name, + ValuesSourceConfig config, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { - return new SumAggregator(name, null, config.format(), context, parent, pipelineAggregators, metaData); + return new SumAggregator(name, null, config.format(), searchContext, parent, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new SumAggregator(name, valuesSource, config.format(), context, parent, pipelineAggregators, metaData); + protected Aggregator doCreateInternal(Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new SumAggregator(name, valuesSource, config.format(), searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorFactory.java index e4b3a722c7c..5138ff27416 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -40,27 +41,39 @@ class TDigestPercentileRanksAggregatorFactory private final double compression; private final boolean keyed; - TDigestPercentileRanksAggregatorFactory(String name, ValuesSourceConfig config, double[] percents, - double compression, boolean keyed, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + TDigestPercentileRanksAggregatorFactory(String name, + ValuesSourceConfig config, + double[] percents, + double compression, + boolean keyed, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.percents = percents; this.compression = compression; this.keyed = keyed; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return new TDigestPercentileRanksAggregator(name, null, context, parent, percents, compression, keyed, config.format(), + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new TDigestPercentileRanksAggregator(name, null, searchContext, parent, percents, compression, keyed, config.format(), pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new TDigestPercentileRanksAggregator(name, valuesSource, context, parent, percents, compression, keyed, config.format(), - pipelineAggregators, metaData); + protected Aggregator doCreateInternal(Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new TDigestPercentileRanksAggregator(name, valuesSource, searchContext, parent, + percents, compression, keyed, config.format(), pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorFactory.java index eed8df50116..252a3b4ac38 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -41,25 +42,31 @@ class TDigestPercentilesAggregatorFactory private final boolean keyed; TDigestPercentilesAggregatorFactory(String name, ValuesSourceConfig config, double[] percents, - double compression, boolean keyed, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + double compression, boolean keyed, QueryShardContext queryShardContext, AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); this.percents = percents; this.compression = compression; this.keyed = keyed; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return new TDigestPercentilesAggregator(name, null, context, parent, percents, compression, keyed, config.format(), + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new TDigestPercentilesAggregator(name, null, searchContext, parent, percents, compression, keyed, config.format(), pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new TDigestPercentilesAggregator(name, valuesSource, context, parent, percents, compression, keyed, config.format(), + protected Aggregator doCreateInternal(Numeric valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new TDigestPercentilesAggregator(name, valuesSource, searchContext, parent, percents, compression, keyed, config.format(), pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index 163d85682b2..67a95d23e91 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -43,7 +43,6 @@ import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFor import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.sort.ScoreSortBuilder; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.sort.SortBuilder; @@ -577,10 +576,10 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder maxInnerResultWindow) { throw new IllegalArgumentException( "Top hits result window is too large, the top hits aggregator [" + name + "]'s from + size must be less " + @@ -593,9 +592,8 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder fields = new ArrayList<>(); if (scriptFields != null) { for (ScriptField field : scriptFields) { - QueryShardContext shardContext = context.getQueryShardContext(); - FieldScript.Factory factory = shardContext.getScriptService().compile(field.script(), FieldScript.CONTEXT); - FieldScript.LeafFactory searchScript = factory.newFactory(field.script().getParams(), shardContext.lookup()); + FieldScript.Factory factory = queryShardContext.getScriptService().compile(field.script(), FieldScript.CONTEXT); + FieldScript.LeafFactory searchScript = factory.newFactory(field.script().getParams(), queryShardContext.lookup()); fields.add(new org.elasticsearch.search.fetch.subphase.ScriptFieldsContext.ScriptField( field.fieldName(), searchScript, field.ignoreFailure())); } @@ -605,11 +603,11 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder scriptFields; private final FetchSourceContext fetchSourceContext; - TopHitsAggregatorFactory(String name, int from, int size, boolean explain, boolean version, boolean seqNoAndPrimaryTerm, - boolean trackScores, Optional sort, HighlightBuilder highlightBuilder, StoredFieldsContext storedFieldsContext, - List docValueFields, List scriptFields, FetchSourceContext fetchSourceContext, - SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) - throws IOException { - super(name, context, parent, subFactories, metaData); + TopHitsAggregatorFactory(String name, + int from, + int size, + boolean explain, + boolean version, + boolean seqNoAndPrimaryTerm, + boolean trackScores, + Optional sort, + HighlightBuilder highlightBuilder, + StoredFieldsContext storedFieldsContext, + List docValueFields, + List scriptFields, + FetchSourceContext fetchSourceContext, + QueryShardContext queryShardContext, + AggregatorFactory parent, + AggregatorFactories.Builder subFactories, + Map metaData) throws IOException { + super(name, queryShardContext, parent, subFactories, metaData); this.from = from; this.size = size; this.explain = explain; @@ -74,10 +87,13 @@ class TopHitsAggregatorFactory extends AggregatorFactory { } @Override - public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, - Map metaData) throws IOException { - SubSearchContext subSearchContext = new SubSearchContext(context); - subSearchContext.parsedQuery(context.parsedQuery()); + public Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + SubSearchContext subSearchContext = new SubSearchContext(searchContext); + subSearchContext.parsedQuery(searchContext.parsedQuery()); subSearchContext.explain(explain); subSearchContext.version(version); subSearchContext.seqNoAndPrimaryTerm(seqNoAndPrimaryTerm); @@ -100,9 +116,9 @@ class TopHitsAggregatorFactory extends AggregatorFactory { subSearchContext.fetchSourceContext(fetchSourceContext); } if (highlightBuilder != null) { - subSearchContext.highlight(highlightBuilder.build(context.getQueryShardContext())); + subSearchContext.highlight(highlightBuilder.build(searchContext.getQueryShardContext())); } - return new TopHitsAggregator(context.fetchPhase(), subSearchContext, name, context, parent, + return new TopHitsAggregator(searchContext.fetchPhase(), subSearchContext, name, searchContext, parent, pipelineAggregators, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java index 845fab414a3..eca8d1ead8c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -34,7 +35,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -84,9 +84,11 @@ public class ValueCountAggregationBuilder extends ValuesSourceAggregationBuilder } @Override - protected ValueCountAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder) throws IOException { - return new ValueCountAggregatorFactory(name, config, context, parent, subFactoriesBuilder, metaData); + protected ValueCountAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder) throws IOException { + return new ValueCountAggregatorFactory(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorFactory.java index b149dc9a730..b00eaf6f278 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -34,20 +35,26 @@ import java.util.Map; class ValueCountAggregatorFactory extends ValuesSourceAggregatorFactory { - ValueCountAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, + ValueCountAggregatorFactory(String name, ValuesSourceConfig config, QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); + super(name, config, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return new ValueCountAggregator(name, null, context, parent, pipelineAggregators, metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new ValueCountAggregator(name, null, searchContext, parent, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - return new ValueCountAggregator(name, valuesSource, context, parent, pipelineAggregators, metaData); + protected Aggregator doCreateInternal(ValuesSource valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + return new ValueCountAggregator(name, valuesSource, searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java index 4cc2ff97c4c..754f192dfd6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; @@ -37,7 +38,6 @@ import org.elasticsearch.search.aggregations.support.MultiValuesSourceParseHelpe import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -98,12 +98,12 @@ public class WeightedAvgAggregationBuilder extends MultiValuesSourceAggregationB } @Override - protected MultiValuesSourceAggregatorFactory innerBuild(SearchContext context, - Map> configs, - DocValueFormat format, - AggregatorFactory parent, - Builder subFactoriesBuilder) throws IOException { - return new WeightedAvgAggregatorFactory(name, configs, format, context, parent, subFactoriesBuilder, metaData); + protected MultiValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + Map> configs, + DocValueFormat format, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { + return new WeightedAvgAggregatorFactory(name, configs, format, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java index 0e57e000cf1..8bc8e4f85c4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -36,29 +37,34 @@ import java.util.Map; class WeightedAvgAggregatorFactory extends MultiValuesSourceAggregatorFactory { - WeightedAvgAggregatorFactory(String name, Map> configs, - DocValueFormat format, SearchContext context, AggregatorFactory parent, + WeightedAvgAggregatorFactory(String name, Map> configs, + DocValueFormat format, QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, configs, format, context, parent, subFactoriesBuilder, metaData); + super(name, configs, format, queryShardContext, parent, subFactoriesBuilder, metaData); } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return new WeightedAvgAggregator(name, null, format, context, parent, pipelineAggregators, metaData); + protected Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException { + return new WeightedAvgAggregator(name, null, format, searchContext, parent, pipelineAggregators, metaData); } @Override - protected Aggregator doCreateInternal(Map> configs, DocValueFormat format, - Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, - Map metaData) throws IOException { + protected Aggregator doCreateInternal(SearchContext searchContext, + Map> configs, + DocValueFormat format, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { MultiValuesSource.NumericMultiValuesSource numericMultiVS - = new MultiValuesSource.NumericMultiValuesSource(configs, context.getQueryShardContext()); + = new MultiValuesSource.NumericMultiValuesSource(configs, queryShardContext); if (numericMultiVS.areValuesSourcesEmpty()) { - return createUnmapped(parent, pipelineAggregators, metaData); + return createUnmapped(searchContext, parent, pipelineAggregators, metaData); } - return new WeightedAvgAggregator(name, numericMultiVS, format, context, parent, pipelineAggregators, metaData); + return new WeightedAvgAggregator(name, numericMultiVS, format, searchContext, parent, pipelineAggregators, metaData); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index cd37d1a9969..efeed0c9efb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -22,13 +22,12 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationInitializationException; -import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.HashMap; @@ -164,18 +163,18 @@ public abstract class MultiValuesSourceAggregationBuilder doBuild(SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder) throws IOException { + protected final MultiValuesSourceAggregatorFactory doBuild(QueryShardContext queryShardContext, AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { ValueType finalValueType = this.valueType != null ? this.valueType : targetValueType; Map> configs = new HashMap<>(fields.size()); fields.forEach((key, value) -> { - ValuesSourceConfig config = ValuesSourceConfig.resolve(context.getQueryShardContext(), finalValueType, + ValuesSourceConfig config = ValuesSourceConfig.resolve(queryShardContext, finalValueType, value.getFieldName(), value.getScript(), value.getMissing(), value.getTimeZone(), format); configs.put(key, config); }); DocValueFormat docValueFormat = resolveFormat(format, finalValueType); - return innerBuild(context, configs, docValueFormat, parent, subFactoriesBuilder); + return innerBuild(queryShardContext, configs, docValueFormat, parent, subFactoriesBuilder); } @@ -190,9 +189,10 @@ public abstract class MultiValuesSourceAggregationBuilder innerBuild(SearchContext context, - Map> configs, DocValueFormat format, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder) throws IOException; + protected abstract MultiValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + Map> configs, + DocValueFormat format, AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException; /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregatorFactory.java index 9516c9db875..a76f345071d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.support; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -37,26 +38,31 @@ public abstract class MultiValuesSourceAggregatorFactory> configs, - DocValueFormat format, SearchContext context, + DocValueFormat format, QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, context, parent, subFactoriesBuilder, metaData); + super(name, queryShardContext, parent, subFactoriesBuilder, metaData); this.configs = configs; this.format = format; } @Override - public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, - Map metaData) throws IOException { + public Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { - return doCreateInternal(configs, format, parent, collectsFromSingleBucket, - pipelineAggregators, metaData); + return doCreateInternal(searchContext, configs, format, parent, + collectsFromSingleBucket, pipelineAggregators, metaData); } - protected abstract Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, - Map metaData) throws IOException; + protected abstract Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException; - protected abstract Aggregator doCreateInternal(Map> configs, + protected abstract Aggregator doCreateInternal(SearchContext searchContext, Map> configs, DocValueFormat format, Aggregator parent, boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) throws IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index ce0586366bc..14ef917a357 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -23,13 +23,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationInitializationException; -import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.time.ZoneId; @@ -317,10 +316,10 @@ public abstract class ValuesSourceAggregationBuilder doBuild(SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder) throws IOException { - ValuesSourceConfig config = resolveConfig(context); - ValuesSourceAggregatorFactory factory = innerBuild(context, config, parent, subFactoriesBuilder); + protected final ValuesSourceAggregatorFactory doBuild(QueryShardContext queryShardContext, AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { + ValuesSourceConfig config = resolveConfig(queryShardContext); + ValuesSourceAggregatorFactory factory = innerBuild(queryShardContext, config, parent, subFactoriesBuilder); return factory; } @@ -345,14 +344,16 @@ public abstract class ValuesSourceAggregationBuilder resolveConfig(SearchContext context) { + protected ValuesSourceConfig resolveConfig(QueryShardContext queryShardContext) { ValueType valueType = this.valueType != null ? this.valueType : targetValueType; - return ValuesSourceConfig.resolve(context.getQueryShardContext(), + return ValuesSourceConfig.resolve(queryShardContext, valueType, field, script, missing, timeZone, format, this::resolveScriptAny); } - protected abstract ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder) throws IOException; + protected abstract ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException; @Override public final XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java index 17da14aabf8..8d3d5de5535 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.support; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -33,20 +34,20 @@ public abstract class ValuesSourceAggregatorFactory ext protected ValuesSourceConfig config; - public ValuesSourceAggregatorFactory(String name, ValuesSourceConfig config, SearchContext context, + public ValuesSourceAggregatorFactory(String name, ValuesSourceConfig config, QueryShardContext queryShardContext, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { - super(name, context, parent, subFactoriesBuilder, metaData); + super(name, queryShardContext, parent, subFactoriesBuilder, metaData); this.config = config; } @Override - public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - VS vs = config.toValuesSource(context.getQueryShardContext(), this::resolveMissingAny); + public Aggregator createInternal(SearchContext searchContext, Aggregator parent, boolean collectsFromSingleBucket, + List pipelineAggregators, Map metaData) throws IOException { + VS vs = config.toValuesSource(queryShardContext, this::resolveMissingAny); if (vs == null) { - return createUnmapped(parent, pipelineAggregators, metaData); + return createUnmapped(searchContext, parent, pipelineAggregators, metaData); } - return doCreateInternal(vs, parent, collectsFromSingleBucket, pipelineAggregators, metaData); + return doCreateInternal(vs, searchContext, parent, collectsFromSingleBucket, pipelineAggregators, metaData); } /** @@ -64,11 +65,16 @@ public abstract class ValuesSourceAggregatorFactory ext return ValuesSource.Bytes.WithOrdinals.EMPTY; } - protected abstract Aggregator createUnmapped(Aggregator parent, - List pipelineAggregators, Map metaData) throws IOException; + protected abstract Aggregator createUnmapped(SearchContext searchContext, + Aggregator parent, + List pipelineAggregators, + Map metaData) throws IOException; - protected abstract Aggregator doCreateInternal(VS valuesSource, Aggregator parent, - boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) - throws IOException; + protected abstract Aggregator doCreateInternal(VS valuesSource, + SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException; } diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java index 2ebf413b140..3246680986d 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java @@ -34,8 +34,7 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.query.InnerHitBuilder; -import org.elasticsearch.search.SearchContextException; -import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.index.query.QueryShardContext; import java.io.IOException; import java.util.ArrayList; @@ -200,32 +199,22 @@ public class CollapseBuilder implements Writeable, ToXContentObject { return result; } - public CollapseContext build(SearchContext context) { - if (context.scrollContext() != null) { - throw new SearchContextException(context, "cannot use `collapse` in a scroll context"); - } - if (context.searchAfter() != null) { - throw new SearchContextException(context, "cannot use `collapse` in conjunction with `search_after`"); - } - if (context.rescore() != null && context.rescore().isEmpty() == false) { - throw new SearchContextException(context, "cannot use `collapse` in conjunction with `rescore`"); - } - - MappedFieldType fieldType = context.getQueryShardContext().fieldMapper(field); + public CollapseContext build(QueryShardContext queryShardContext) { + MappedFieldType fieldType = queryShardContext.fieldMapper(field); if (fieldType == null) { - throw new SearchContextException(context, "no mapping found for `" + field + "` in order to collapse on"); + throw new IllegalArgumentException("no mapping found for `" + field + "` in order to collapse on"); } if (fieldType instanceof KeywordFieldMapper.KeywordFieldType == false && fieldType instanceof NumberFieldMapper.NumberFieldType == false) { - throw new SearchContextException(context, "unknown type for collapse field `" + field + + throw new IllegalArgumentException("unknown type for collapse field `" + field + "`, only keywords and numbers are accepted"); } if (fieldType.hasDocValues() == false) { - throw new SearchContextException(context, "cannot collapse on field `" + field + "` without `doc_values`"); + throw new IllegalArgumentException("cannot collapse on field `" + field + "` without `doc_values`"); } if (fieldType.indexOptions() == IndexOptions.NONE && (innerHits != null && !innerHits.isEmpty())) { - throw new SearchContextException(context, "cannot expand `inner_hits` for collapse field `" + throw new IllegalArgumentException("cannot expand `inner_hits` for collapse field `" + field + "`, " + "only indexed field can retrieve `inner_hits`"); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java index 48f2f1299c2..c622970dc13 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java @@ -41,7 +41,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.HashMap; import java.util.Map; -import java.util.Objects; /** * Context used for inner hits retrieval @@ -53,10 +52,6 @@ public final class InnerHitsContext { this.innerHits = new HashMap<>(); } - InnerHitsContext(Map innerHits) { - this.innerHits = Objects.requireNonNull(innerHits); - } - public Map getInnerHits() { return innerHits; } @@ -77,8 +72,6 @@ public final class InnerHitsContext { public abstract static class InnerHitSubContext extends SubSearchContext { private final String name; - protected final SearchContext context; - private InnerHitsContext childInnerHits; // TODO: when types are complete removed just use String instead for the id: private Uid uid; @@ -86,7 +79,6 @@ public final class InnerHitsContext { protected InnerHitSubContext(String name, SearchContext context) { super(context); this.name = name; - this.context = context; } public abstract TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException; @@ -95,25 +87,12 @@ public final class InnerHitsContext { return name; } - @Override - public InnerHitsContext innerHits() { - return childInnerHits; - } - - public void setChildInnerHits(Map childInnerHits) { - this.childInnerHits = new InnerHitsContext(childInnerHits); - } - protected Weight createInnerHitQueryWeight() throws IOException { final boolean needsScores = size() != 0 && (sort() == null || sort().sort.needsScores()); - return context.searcher().createWeight(context.searcher().rewrite(query()), + return searcher().createWeight(searcher().rewrite(query()), needsScores ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES, 1f); } - public SearchContext parentSearchContext() { - return context; - } - public Uid getUid() { return uid; } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java index 4d34a3afa62..2e177e59e06 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsFetchSubPhase.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.query.InnerHitContextBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.fetch.FetchPhase; @@ -44,11 +45,16 @@ public final class InnerHitsFetchSubPhase implements FetchSubPhase { @Override public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { - if ((context.innerHits() != null && context.innerHits().getInnerHits().size() > 0) == false) { + if (context.innerHits().isEmpty()) { return; } - for (Map.Entry entry : context.innerHits().getInnerHits().entrySet()) { + final InnerHitsContext innerHitsContext = new InnerHitsContext(); + for (Map.Entry entry : context.innerHits().entrySet()) { + entry.getValue().build(context, innerHitsContext); + } + + for (Map.Entry entry : innerHitsContext.getInnerHits().entrySet()) { InnerHitsContext.InnerHitSubContext innerHits = entry.getValue(); TopDocsAndMaxScore[] topDocs = innerHits.topDocs(hits); for (int i = 0; i < hits.length; i++) { diff --git a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 10eb90afc04..1c6df1c7747 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.query.InnerHitContextBuilder; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; @@ -44,7 +45,6 @@ import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; -import org.elasticsearch.search.fetch.subphase.InnerHitsContext; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight; import org.elasticsearch.search.lookup.SearchLookup; @@ -176,10 +176,15 @@ public abstract class FilteredSearchContext extends SearchContext { } @Override - public InnerHitsContext innerHits() { + public Map innerHits() { return in.innerHits(); } + @Override + public void innerHits(Map innerHits) { + in.innerHits(innerHits); + } + @Override public SuggestionSearchContext suggest() { return in.suggest(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java index fba80d5f3c6..c8f55d3781c 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.internal; - import org.apache.lucene.search.Collector; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Query; @@ -37,6 +36,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.query.InnerHitContextBuilder; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; @@ -51,7 +51,6 @@ import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; -import org.elasticsearch.search.fetch.subphase.InnerHitsContext; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight; import org.elasticsearch.search.lookup.SearchLookup; @@ -87,7 +86,6 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas private Map> clearables = null; private final AtomicBoolean closed = new AtomicBoolean(false); - private InnerHitsContext innerHitsContext; protected SearchContext() { super("search_context"); @@ -164,12 +162,9 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas public abstract void highlight(SearchContextHighlight highlight); - public InnerHitsContext innerHits() { - if (innerHitsContext == null) { - innerHitsContext = new InnerHitsContext(); - } - return innerHitsContext; - } + public abstract void innerHits(Map innerHits); + + public abstract Map innerHits(); public abstract SuggestionSearchContext suggest(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index 7d61eb6870b..73e4c252b2c 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -20,7 +20,9 @@ package org.elasticsearch.search.internal; import org.apache.lucene.search.Query; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.query.InnerHitContextBuilder; import org.elasticsearch.index.query.ParsedQuery; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.SearchContextAggregations; import org.elasticsearch.search.collapse.CollapseContext; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -34,7 +36,9 @@ import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestionSearchContext; +import java.util.Collections; import java.util.List; +import java.util.Map; public class SubSearchContext extends FilteredSearchContext { @@ -42,6 +46,8 @@ public class SubSearchContext extends FilteredSearchContext { // the to hits are returned per bucket. private static final int DEFAULT_SIZE = 3; + private final QueryShardContext queryShardContext; + private int from; private int size = DEFAULT_SIZE; private SortAndFormats sort; @@ -60,6 +66,7 @@ public class SubSearchContext extends FilteredSearchContext { private FetchSourceContext fetchSourceContext; private DocValueFieldsContext docValueFieldsContext; private SearchContextHighlight highlight; + private Map innerHits = Collections.emptyMap(); private boolean explain; private boolean trackScores; @@ -70,6 +77,9 @@ public class SubSearchContext extends FilteredSearchContext { super(context); this.fetchSearchResult = new FetchSearchResult(); this.querySearchResult = new QuerySearchResult(); + // we clone the query shard context in the sub context because the original one + // might be frozen at this point. + this.queryShardContext = new QueryShardContext(context.getQueryShardContext()); } @Override @@ -80,6 +90,11 @@ public class SubSearchContext extends FilteredSearchContext { public void preProcess(boolean rewrite) { } + @Override + public QueryShardContext getQueryShardContext() { + return queryShardContext; + } + @Override public Query buildFilteredQuery(Query query) { throw new UnsupportedOperationException("this context should be read only"); @@ -357,4 +372,13 @@ public class SubSearchContext extends FilteredSearchContext { throw new UnsupportedOperationException("Not supported"); } + @Override + public Map innerHits() { + return innerHits; + } + + @Override + public void innerHits(Map innerHits) { + this.innerHits = innerHits; + } } diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java index e1411d5b466..6310db69b20 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/SortBuilder.java @@ -195,7 +195,7 @@ public abstract class SortBuilder> implements NamedWrit } else { parentQuery = objectMapper.nestedTypeFilter(); } - return new Nested(context.bitsetFilter(parentQuery), childQuery, nestedSort, context::newCachedSearcher); + return new Nested(context.bitsetFilter(parentQuery), childQuery, nestedSort, context.searcher()); } private static Query resolveNestedQuery(QueryShardContext context, NestedSortBuilder nestedSort, Query parentQuery) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index f313aff03b9..083382b74b0 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; @@ -53,9 +52,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.SnapshotFailedEngineException; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; @@ -79,7 +77,6 @@ import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.concurrent.Executor; import java.util.function.Function; import java.util.stream.Collectors; @@ -156,7 +153,10 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements if ((previousSnapshots == null && currentSnapshots != null) || (previousSnapshots != null && previousSnapshots.equals(currentSnapshots) == false)) { synchronized (shardSnapshots) { - processIndexShardSnapshots(currentSnapshots); + cancelRemoved(currentSnapshots); + if (currentSnapshots != null) { + startNewSnapshots(currentSnapshots); + } } } @@ -202,18 +202,6 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements } } - /** - * Checks if any new shards should be snapshotted on this node - * - * @param snapshotsInProgress Current snapshots in progress in cluster state - */ - private void processIndexShardSnapshots(SnapshotsInProgress snapshotsInProgress) { - cancelRemoved(snapshotsInProgress); - if (snapshotsInProgress != null) { - startNewSnapshots(snapshotsInProgress); - } - } - private void cancelRemoved(@Nullable SnapshotsInProgress snapshotsInProgress) { // First, remove snapshots that are no longer there Iterator>> it = shardSnapshots.entrySet().iterator(); @@ -268,28 +256,14 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements Map snapshotShards = shardSnapshots.getOrDefault(snapshot, emptyMap()); for (ObjectObjectCursor shard : entry.shards()) { final IndexShardSnapshotStatus snapshotStatus = snapshotShards.get(shard.key); - if (snapshotStatus != null) { - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = - snapshotStatus.abortIfNotCompleted("snapshot has been aborted"); - final Stage stage = lastSnapshotStatus.getStage(); - if (stage == Stage.FINALIZE) { - logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + - "letting it finish", snapshot, shard.key); - } else if (stage == Stage.DONE) { - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + - "updating status on the master", snapshot, shard.key); - notifySuccessfulSnapshotShard(snapshot, shard.key); - } else if (stage == Stage.FAILURE) { - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + - "updating status on the master", snapshot, shard.key); - notifyFailedSnapshotShard(snapshot, shard.key, lastSnapshotStatus.getFailure()); - } - } else { + if (snapshotStatus == null) { // due to CS batching we might have missed the INIT state and straight went into ABORTED // notify master that abort has completed by moving to FAILED if (shard.value.state() == ShardState.ABORTED) { notifyFailedSnapshotShard(snapshot, shard.key, shard.value.reason()); } + } else { + snapshotStatus.abortIfNotCompleted("snapshot has been aborted"); } } } @@ -297,46 +271,33 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements } private void startNewShards(SnapshotsInProgress.Entry entry, Map startedShards) { - final Snapshot snapshot = entry.snapshot(); - final Map indicesMap = entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity())); - final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - for (final Map.Entry shardEntry : startedShards.entrySet()) { - final ShardId shardId = shardEntry.getKey(); - final IndexId indexId = indicesMap.get(shardId.getIndexName()); - assert indexId != null; - executor.execute(new AbstractRunnable() { - - private final SetOnce failure = new SetOnce<>(); - - @Override - public void doRun() { - final IndexShard indexShard = - indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); - snapshot(indexShard, snapshot, indexId, shardEntry.getValue()); - } - - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); - failure.set(e); - } - - @Override - public void onRejection(Exception e) { - failure.set(e); - } - - @Override - public void onAfter() { - final Exception exception = failure.get(); - if (exception != null) { - notifyFailedSnapshotShard(snapshot, shardId, ExceptionsHelper.detailedMessage(exception)); - } else { + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> { + final Snapshot snapshot = entry.snapshot(); + final Map indicesMap = + entry.indices().stream().collect(Collectors.toMap(IndexId::getName, Function.identity())); + for (final Map.Entry shardEntry : startedShards.entrySet()) { + final ShardId shardId = shardEntry.getKey(); + final IndexShardSnapshotStatus snapshotStatus = shardEntry.getValue(); + final IndexId indexId = indicesMap.get(shardId.getIndexName()); + assert indexId != null; + snapshot(shardId, snapshot, indexId, snapshotStatus, new ActionListener() { + @Override + public void onResponse(final Void aVoid) { + if (logger.isDebugEnabled()) { + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); + logger.debug("snapshot ({}) completed to {} with {}", snapshot, snapshot.getRepository(), lastSnapshotStatus); + } notifySuccessfulSnapshotShard(snapshot, shardId); } - } - }); - } + + @Override + public void onFailure(Exception e) { + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); + notifyFailedSnapshotShard(snapshot, shardId, ExceptionsHelper.detailedMessage(e)); + } + }); + } + }); } /** @@ -345,37 +306,37 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements * @param snapshot snapshot * @param snapshotStatus snapshot status */ - private void snapshot(final IndexShard indexShard, final Snapshot snapshot, final IndexId indexId, - final IndexShardSnapshotStatus snapshotStatus) { - final ShardId shardId = indexShard.shardId(); - if (indexShard.routingEntry().primary() == false) { - throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary"); - } - if (indexShard.routingEntry().relocating()) { - // do not snapshot when in the process of relocation of primaries so we won't get conflicts - throw new IndexShardSnapshotFailedException(shardId, "cannot snapshot while relocating"); - } - - final IndexShardState indexShardState = indexShard.state(); - if (indexShardState == IndexShardState.CREATED || indexShardState == IndexShardState.RECOVERING) { - // shard has just been created, or still recovering - throw new IndexShardSnapshotFailedException(shardId, "shard didn't fully recover yet"); - } - - final Repository repository = repositoriesService.repository(snapshot.getRepository()); + private void snapshot(final ShardId shardId, final Snapshot snapshot, final IndexId indexId, + final IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { try { - // we flush first to make sure we get the latest writes snapshotted - try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) { - repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus); - if (logger.isDebugEnabled()) { - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); - logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus); - } + final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); + if (indexShard.routingEntry().primary() == false) { + throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary"); + } + if (indexShard.routingEntry().relocating()) { + // do not snapshot when in the process of relocation of primaries so we won't get conflicts + throw new IndexShardSnapshotFailedException(shardId, "cannot snapshot while relocating"); + } + + final IndexShardState indexShardState = indexShard.state(); + if (indexShardState == IndexShardState.CREATED || indexShardState == IndexShardState.RECOVERING) { + // shard has just been created, or still recovering + throw new IndexShardSnapshotFailedException(shardId, "shard didn't fully recover yet"); + } + + final Repository repository = repositoriesService.repository(snapshot.getRepository()); + Engine.IndexCommitRef snapshotRef = null; + try { + // we flush first to make sure we get the latest writes snapshotted + snapshotRef = indexShard.acquireLastIndexCommit(true); + repository.snapshotShard(indexShard.store(), indexShard.mapperService(), snapshot.getSnapshotId(), indexId, + snapshotRef.getIndexCommit(), snapshotStatus, ActionListener.runBefore(listener, snapshotRef::close)); + } catch (Exception e) { + IOUtils.close(snapshotRef); + throw e; } - } catch (SnapshotFailedEngineException | IndexShardSnapshotFailedException e) { - throw e; } catch (Exception e) { - throw new IndexShardSnapshotFailedException(shardId, "Failed to snapshot", e); + listener.onFailure(e); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index e9b7915cf98..5af6e397a55 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -26,6 +26,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; @@ -68,6 +69,7 @@ import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.threadpool.ThreadPool; @@ -103,17 +105,24 @@ import static org.elasticsearch.cluster.SnapshotsInProgress.completed; * the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, ActionListener)} method kicks in and initializes * the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state *
  • Each data node is watching for these shards and when new shards scheduled for snapshotting appear in the cluster state, data nodes - * start processing them through {@link SnapshotShardsService#processIndexShardSnapshots(SnapshotsInProgress)} method
  • + * start processing them through {@link SnapshotShardsService#startNewSnapshots} method *
  • Once shard snapshot is created data node updates state of the shard in the cluster state using * the {@link SnapshotShardsService#sendSnapshotShardUpdate(Snapshot, ShardId, ShardSnapshotStatus)} method
  • *
  • When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot * as completed
  • - *
  • After cluster state is updated, the {@link #endSnapshot(SnapshotsInProgress.Entry)} finalizes snapshot in the repository, + *
  • After cluster state is updated, the {@link #endSnapshot(SnapshotsInProgress.Entry, MetaData)} finalizes snapshot in the repository, * notifies all {@link #snapshotCompletionListeners} that snapshot is completed, and finally calls * {@link #removeSnapshotFromClusterState(Snapshot, SnapshotInfo, Exception)} to remove snapshot from cluster state
  • * */ public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { + + /** + * Minimum node version which does not use {@link Repository#initializeSnapshot(SnapshotId, List, MetaData)} to write snapshot metadata + * when starting a snapshot. + */ + public static final Version NO_REPO_INITIALIZE_VERSION = Version.V_7_5_0; + private static final Logger logger = LogManager.getLogger(SnapshotsService.class); private final ClusterService clusterService; @@ -398,24 +407,29 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus assert initializingSnapshots.contains(snapshot.snapshot()); Repository repository = repositoriesService.repository(snapshot.snapshot().getRepository()); - MetaData metaData = clusterState.metaData(); - if (!snapshot.includeGlobalState()) { - // Remove global state from the cluster state - MetaData.Builder builder = MetaData.builder(); - for (IndexId index : snapshot.indices()) { - builder.put(metaData.index(index.getName()), false); - } - metaData = builder.build(); + if (repository.isReadOnly()) { + throw new RepositoryException(repository.getMetadata().name(), "cannot create snapshot in a readonly repository"); + } + final String snapshotName = snapshot.snapshot().getSnapshotId().getName(); + // check if the snapshot name already exists in the repository + if (repository.getRepositoryData().getSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { + throw new InvalidSnapshotNameException( + repository.getMetadata().name(), snapshotName, "snapshot with the same name already exists"); + } + if (clusterState.nodes().getMinNodeVersion().onOrAfter(NO_REPO_INITIALIZE_VERSION) == false) { + // In mixed version clusters we initialize the snapshot in the repository so that in case of a master failover to an + // older version master node snapshot finalization (that assumes initializeSnapshot was called) produces a valid + // snapshot. + repository.initializeSnapshot( + snapshot.snapshot().getSnapshotId(), snapshot.indices(), metaDataForSnapshot(snapshot, clusterState.metaData())); } - - repository.initializeSnapshot(snapshot.snapshot().getSnapshotId(), snapshot.indices(), metaData); snapshotCreated = true; logger.info("snapshot [{}] started", snapshot.snapshot()); if (snapshot.indices().isEmpty()) { // No indices in this snapshot - we are done userCreateSnapshotListener.onResponse(snapshot.snapshot()); - endSnapshot(snapshot); + endSnapshot(snapshot, clusterState.metaData()); return; } clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { @@ -498,7 +512,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus assert snapshotsInProgress != null; final SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshot.snapshot()); assert entry != null; - endSnapshot(entry); + endSnapshot(entry, newState.metaData()); } } }); @@ -545,27 +559,41 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus } private void cleanupAfterError(Exception exception) { - if(snapshotCreated) { - try { - repositoriesService.repository(snapshot.snapshot().getRepository()) - .finalizeSnapshot(snapshot.snapshot().getSnapshotId(), - snapshot.indices(), - snapshot.startTime(), - ExceptionsHelper.detailedMessage(exception), + threadPool.generic().execute(() -> { + if (snapshotCreated) { + try { + repositoriesService.repository(snapshot.snapshot().getRepository()) + .finalizeSnapshot(snapshot.snapshot().getSnapshotId(), + snapshot.indices(), + snapshot.startTime(), + ExceptionsHelper.detailedMessage(exception), 0, Collections.emptyList(), snapshot.getRepositoryStateId(), snapshot.includeGlobalState(), + metaDataForSnapshot(snapshot, clusterService.state().metaData()), snapshot.userMetadata()); } catch (Exception inner) { inner.addSuppressed(exception); logger.warn(() -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner); + } } - } - userCreateSnapshotListener.onFailure(e); + userCreateSnapshotListener.onFailure(e); + }); } + } + private static MetaData metaDataForSnapshot(SnapshotsInProgress.Entry snapshot, MetaData metaData) { + if (snapshot.includeGlobalState() == false) { + // Remove global state from the cluster state + MetaData.Builder builder = MetaData.builder(); + for (IndexId index : snapshot.indices()) { + builder.put(metaData.index(index.getName()), false); + } + metaData = builder.build(); + } + return metaData; } private static SnapshotInfo inProgressSnapshot(SnapshotsInProgress.Entry entry) { @@ -713,7 +741,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus entry -> entry.state().completed() || initializingSnapshots.contains(entry.snapshot()) == false && (entry.state() == State.INIT || completed(entry.shards().values())) - ).forEach(this::endSnapshot); + ).forEach(entry -> endSnapshot(entry, event.state().metaData())); } if (newMaster) { finalizeSnapshotDeletionFromPreviousMaster(event); @@ -960,7 +988,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus * * @param entry snapshot */ - private void endSnapshot(final SnapshotsInProgress.Entry entry) { + private void endSnapshot(SnapshotsInProgress.Entry entry, MetaData metaData) { if (endingSnapshots.add(entry.snapshot()) == false) { return; } @@ -988,6 +1016,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus unmodifiableList(shardFailures), entry.getRepositoryStateId(), entry.includeGlobalState(), + metaDataForSnapshot(entry, metaData), entry.userMetadata()); removeSnapshotFromClusterState(snapshot, snapshotInfo, null); logger.info("snapshot [{}] completed with state [{}]", snapshot, snapshotInfo.state()); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 9fb1a84b849..78bbf9775cc 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -143,7 +143,10 @@ public abstract class RemoteClusterAware { }; /** - * A proxy address for the remote cluster. + * A proxy address for the remote cluster. By default this is not set, meaning that Elasticsearch will connect directly to the nodes in + * the remote cluster using their publish addresses. If this setting is set to an IP address or hostname then Elasticsearch will connect + * to the nodes in the remote cluster using this address instead. Use of this setting is not recommended and it is deliberately + * undocumented as it does not work well with all proxies. */ public static final Setting.AffixSetting REMOTE_CLUSTERS_PROXY = Setting.affixKeySetting( "cluster.remote.", diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index bae52c6f59f..4846a75b9ba 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -62,6 +62,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.engine.RecoveryEngineException; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.seqno.RetentionLeaseAlreadyExistsException; +import org.elasticsearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException; import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexShardState; @@ -818,6 +819,7 @@ public class ExceptionSerializationTests extends ESTestCase { ids.put(153, RetentionLeaseAlreadyExistsException.class); ids.put(154, RetentionLeaseNotFoundException.class); ids.put(155, ShardNotInPrimaryModeException.class); + ids.put(156, RetentionLeaseInvalidRetainingSeqNoException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java b/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java index cd3735b4843..4f9b63fb75e 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionListenerTests.java @@ -171,6 +171,23 @@ public class ActionListenerTests extends ESTestCase { } } + public void testRunBefore() { + { + AtomicBoolean afterSuccess = new AtomicBoolean(); + ActionListener listener = + ActionListener.runBefore(ActionListener.wrap(r -> {}, e -> {}), () -> afterSuccess.set(true)); + listener.onResponse(null); + assertThat(afterSuccess.get(), equalTo(true)); + } + { + AtomicBoolean afterFailure = new AtomicBoolean(); + ActionListener listener = + ActionListener.runBefore(ActionListener.wrap(r -> {}, e -> {}), () -> afterFailure.set(true)); + listener.onFailure(null); + assertThat(afterFailure.get(), equalTo(true)); + } + } + public void testNotifyOnce() { AtomicInteger onResponseTimes = new AtomicInteger(); AtomicInteger onFailureTimes = new AtomicInteger(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index f5d881e2b04..bfbccf0b142 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; import org.junit.After; @@ -226,7 +227,8 @@ public class RetryTests extends ESTestCase { } private BulkItemResponse successfulResponse() { - return new BulkItemResponse(1, OpType.DELETE, new DeleteResponse(null, null, null, 0, 0, 0, false)); + return new BulkItemResponse(1, OpType.DELETE, new DeleteResponse( + new ShardId("test", "test", 0), "_doc", "test", 0, 0, 0, false)); } private BulkItemResponse failedResponse() { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index f213b523fbf..4bab8636bbf 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -106,6 +107,9 @@ public class TransportBulkActionIndicesThatCannotBeCreatedTests extends ESTestCa ClusterState state = mock(ClusterState.class); when(state.getMetaData()).thenReturn(MetaData.EMPTY_META_DATA); when(clusterService.state()).thenReturn(state); + DiscoveryNode localNode = mock(DiscoveryNode.class); + when(clusterService.localNode()).thenReturn(localNode); + when(localNode.isIngestNode()).thenReturn(randomBoolean()); final ThreadPool threadPool = mock(ThreadPool.class); final ExecutorService direct = EsExecutors.newDirectExecutorService(); when(threadPool.executor(anyString())).thenReturn(direct); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 22d6c2f722d..8acb3e8cc93 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -240,6 +240,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { Engine.IndexResult success = new FakeIndexResult(1, 1, 13, true, resultLocation); IndexShard shard = mock(IndexShard.class); + when(shard.shardId()).thenReturn(shardId); when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())) .thenReturn(mappingUpdate); @@ -583,6 +584,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())) .thenReturn(indexResult); when(shard.indexSettings()).thenReturn(indexSettings); + when(shard.shardId()).thenReturn(shardId); UpdateHelper updateHelper = mock(UpdateHelper.class); when(updateHelper.prepare(any(), eq(shard), any())).thenReturn( @@ -629,6 +631,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { IndexShard shard = mock(IndexShard.class); when(shard.applyDeleteOperationOnPrimary(anyLong(), any(), any(), any(), anyLong(), anyLong())).thenReturn(deleteResult); when(shard.indexSettings()).thenReturn(indexSettings); + when(shard.shardId()).thenReturn(shardId); UpdateHelper updateHelper = mock(UpdateHelper.class); when(updateHelper.prepare(any(), eq(shard), any())).thenReturn( @@ -783,6 +786,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { } }); when(shard.indexSettings()).thenReturn(indexSettings); + when(shard.shardId()).thenReturn(shardId); UpdateHelper updateHelper = mock(UpdateHelper.class); when(updateHelper.prepare(any(), eq(shard), any())).thenReturn( @@ -814,7 +818,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase { private void randomlySetIgnoredPrimaryResponse(BulkItemRequest primaryRequest) { if (randomBoolean()) { // add a response to the request and thereby check that it is ignored for the primary. - primaryRequest.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, new IndexResponse(null, "_doc", + primaryRequest.setPrimaryResponse(new BulkItemResponse(0, DocWriteRequest.OpType.INDEX, new IndexResponse(shardId, "_doc", "ignore-primary-response-on-primary", 42, 42, 42, false))); } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java index 66cc4a1fcc6..7d6f47f1bf7 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulateExecutionServiceTests.java @@ -119,8 +119,7 @@ public class SimulateExecutionServiceTests extends ESTestCase { public void testExecuteVerboseItemExceptionWithoutOnFailure() throws Exception { TestProcessor processor1 = new TestProcessor("processor_0", "mock", ingestDocument -> {}); - TestProcessor processor2 = new TestProcessor("processor_1", "mock", - ingestDocument -> { throw new RuntimeException("processor failed"); }); + TestProcessor processor2 = new TestProcessor("processor_1", "mock", new RuntimeException("processor failed")); TestProcessor processor3 = new TestProcessor("processor_2", "mock", ingestDocument -> {}); Pipeline pipeline = new Pipeline("_id", "_description", version, new CompoundProcessor(processor1, processor2, processor3)); CountDownLatch latch = new CountDownLatch(1); @@ -151,8 +150,7 @@ public class SimulateExecutionServiceTests extends ESTestCase { } public void testExecuteVerboseItemWithOnFailure() throws Exception { - TestProcessor processor1 = new TestProcessor("processor_0", "mock", - ingestDocument -> { throw new RuntimeException("processor failed"); }); + TestProcessor processor1 = new TestProcessor("processor_0", "mock", new RuntimeException("processor failed")); TestProcessor processor2 = new TestProcessor("processor_1", "mock", ingestDocument -> {}); TestProcessor processor3 = new TestProcessor("processor_2", "mock", ingestDocument -> {}); Pipeline pipeline = new Pipeline("_id", "_description", version, @@ -197,7 +195,7 @@ public class SimulateExecutionServiceTests extends ESTestCase { public void testExecuteVerboseItemExceptionWithIgnoreFailure() throws Exception { RuntimeException exception = new RuntimeException("processor failed"); - TestProcessor testProcessor = new TestProcessor("processor_0", "mock", ingestDocument -> { throw exception; }); + TestProcessor testProcessor = new TestProcessor("processor_0", "mock", exception); CompoundProcessor processor = new CompoundProcessor(true, Collections.singletonList(testProcessor), Collections.emptyList()); Pipeline pipeline = new Pipeline("_id", "_description", version, new CompoundProcessor(processor)); CountDownLatch latch = new CountDownLatch(1); diff --git a/server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java b/server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java index 8ef27c8e939..5d97855229d 100644 --- a/server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java +++ b/server/src/test/java/org/elasticsearch/bwcompat/RecoveryWithUnsupportedIndicesIT.java @@ -28,9 +28,11 @@ import java.util.List; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.gateway.CorruptStateException; import org.elasticsearch.test.ESIntegTestCase; import static org.hamcrest.Matchers.containsString; @@ -42,7 +44,7 @@ public class RecoveryWithUnsupportedIndicesIT extends ESIntegTestCase { /** * Return settings that could be used to start a node that has the given zipped home directory. */ - protected Settings prepareBackwardsDataDir(Path backwardsIndex) throws IOException { + private Settings prepareBackwardsDataDir(Path backwardsIndex) throws IOException { Path indexDir = createTempDir(); Path dataDir = indexDir.resolve("data"); try (InputStream stream = Files.newInputStream(backwardsIndex)) { @@ -86,7 +88,8 @@ public class RecoveryWithUnsupportedIndicesIT extends ESIntegTestCase { logger.info("Checking static index {}", indexName); Settings nodeSettings = prepareBackwardsDataDir(getDataPath("/indices/bwc").resolve(indexName + ".zip")); - assertThat(expectThrows(Exception.class, () -> internalCluster().startNode(nodeSettings)) - .getCause().getCause().getMessage(), containsString("Format version is not supported")); + assertThat(ExceptionsHelper.unwrap( + expectThrows(Exception.class, () -> internalCluster().startNode(nodeSettings)), CorruptStateException.class).getMessage(), + containsString("Format version is not supported")); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index 2cdbd95fc63..d03c132eda0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -44,6 +44,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.oneOf; public class DiscoveryNodesTests extends ESTestCase { @@ -163,6 +164,35 @@ public class DiscoveryNodesTests extends ESTestCase { assertEquals(sortedNodes, returnedNodes); } + public void testDeltaListsMultipleNodes() { + final List discoveryNodes = randomNodes(3); + + final DiscoveryNodes nodes0 = DiscoveryNodes.builder().add(discoveryNodes.get(0)).build(); + final DiscoveryNodes nodes01 = DiscoveryNodes.builder(nodes0).add(discoveryNodes.get(1)).build(); + final DiscoveryNodes nodes012 = DiscoveryNodes.builder(nodes01).add(discoveryNodes.get(2)).build(); + + assertThat(nodes01.delta(nodes0).shortSummary(), equalTo("added {" + discoveryNodes.get(1) + "}")); + assertThat(nodes012.delta(nodes0).shortSummary(), oneOf( + "added {" + discoveryNodes.get(1) + "," + discoveryNodes.get(2) + "}", + "added {" + discoveryNodes.get(2) + "," + discoveryNodes.get(1) + "}")); + + assertThat(nodes0.delta(nodes01).shortSummary(), equalTo("removed {" + discoveryNodes.get(1) + "}")); + assertThat(nodes0.delta(nodes012).shortSummary(), oneOf( + "removed {" + discoveryNodes.get(1) + "," + discoveryNodes.get(2) + "}", + "removed {" + discoveryNodes.get(2) + "," + discoveryNodes.get(1) + "}")); + + final DiscoveryNodes nodes01Local = DiscoveryNodes.builder(nodes01).localNodeId(discoveryNodes.get(1).getId()).build(); + final DiscoveryNodes nodes02Local = DiscoveryNodes.builder(nodes012).localNodeId(discoveryNodes.get(1).getId()).build(); + + assertThat(nodes01Local.delta(nodes0).shortSummary(), equalTo("")); + assertThat(nodes02Local.delta(nodes0).shortSummary(), equalTo("added {" + discoveryNodes.get(2) + "}")); + + assertThat(nodes0.delta(nodes01Local).shortSummary(), equalTo("removed {" + discoveryNodes.get(1) + "}")); + assertThat(nodes0.delta(nodes02Local).shortSummary(), oneOf( + "removed {" + discoveryNodes.get(1) + "," + discoveryNodes.get(2) + "}", + "removed {" + discoveryNodes.get(2) + "," + discoveryNodes.get(1) + "}")); + } + public void testDeltas() { Set nodesA = new HashSet<>(); nodesA.addAll(randomNodes(1 + randomInt(10))); diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeometryIndexerTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeometryIndexerTests.java index 5e5b2b688ea..47b02ea6981 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeometryIndexerTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeometryIndexerTests.java @@ -72,7 +72,7 @@ public class GeometryIndexerTests extends ESTestCase { new Point(2, 1), new Point(4, 3), new MultiLine(Arrays.asList( new Line(new double[]{160, 180}, new double[]{10, 15}), - new Line(new double[]{180, -160}, new double[]{15, 20})) + new Line(new double[]{-180, -160}, new double[]{15, 20})) )) ); assertEquals(indexed, indexer.prepareForIndexing(collection)); @@ -87,7 +87,25 @@ public class GeometryIndexerTests extends ESTestCase { line = new Line(new double[]{160, 200}, new double[]{10, 20}); indexed = new MultiLine(Arrays.asList( new Line(new double[]{160, 180}, new double[]{10, 15}), - new Line(new double[]{180, -160}, new double[]{15, 20})) + new Line(new double[]{-180, -160}, new double[]{15, 20})) + ); + + assertEquals(indexed, indexer.prepareForIndexing(line)); + + line = new Line(new double[]{200, 160}, new double[]{10, 20}); + indexed = new MultiLine(Arrays.asList( + new Line(new double[]{-160, -180}, new double[]{10, 15}), + new Line(new double[]{180, 160}, new double[]{15, 20})) + ); + + assertEquals(indexed, indexer.prepareForIndexing(line)); + + line = new Line(new double[]{160, 200, 160}, new double[]{0, 10, 20}); + indexed = new MultiLine(Arrays.asList( + new Line(new double[]{160, 180}, new double[]{0, 5}), + new Line(new double[]{-180, -160, -180}, new double[]{5, 10, 15}), + new Line(new double[]{180, 160}, new double[]{15, 20}) + ) ); assertEquals(indexed, indexer.prepareForIndexing(line)); @@ -106,7 +124,7 @@ public class GeometryIndexerTests extends ESTestCase { indexed = new MultiLine(Arrays.asList( line, new Line(new double[]{160, 180}, new double[]{10, 15}), - new Line(new double[]{180, -160}, new double[]{15, 20})) + new Line(new double[]{-180, -160}, new double[]{15, 20})) ); assertEquals(indexed, indexer.prepareForIndexing(multiLine)); diff --git a/server/src/test/java/org/elasticsearch/common/path/PathTrieTests.java b/server/src/test/java/org/elasticsearch/common/path/PathTrieTests.java index 555f052872b..42886070958 100644 --- a/server/src/test/java/org/elasticsearch/common/path/PathTrieTests.java +++ b/server/src/test/java/org/elasticsearch/common/path/PathTrieTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.path; +import org.elasticsearch.common.path.PathTrie.TrieMatchingMode; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.test.ESTestCase; @@ -29,8 +30,6 @@ import java.util.Map; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -import org.elasticsearch.common.path.PathTrie.TrieMatchingMode; - public class PathTrieTests extends ESTestCase { public static final PathTrie.Decoder NO_DECODER = new PathTrie.Decoder() { @@ -122,13 +121,13 @@ public class PathTrieTests extends ESTestCase { PathTrie trie = new PathTrie<>(NO_DECODER); trie.insert("{testA}", "test1"); trie.insert("{testA}/{testB}", "test2"); - trie.insert("a/{testA}", "test3"); + trie.insert("a/{testB}", "test3"); trie.insert("{testA}/b", "test4"); trie.insert("{testA}/b/c", "test5"); trie.insert("a/{testB}/c", "test6"); trie.insert("a/b/{testC}", "test7"); trie.insert("{testA}/b/{testB}", "test8"); - trie.insert("x/{testA}/z", "test9"); + trie.insert("x/{testB}/z", "test9"); trie.insert("{testA}/{testB}/{testC}", "test10"); Map params = new HashMap<>(); @@ -196,7 +195,7 @@ public class PathTrieTests extends ESTestCase { trie.insert("a", "test2"); trie.insert("{testA}/{testB}", "test3"); trie.insert("a/{testB}", "test4"); - trie.insert("{testB}/b", "test5"); + trie.insert("{testA}/b", "test5"); trie.insert("a/b", "test6"); trie.insert("{testA}/b/{testB}", "test7"); trie.insert("x/{testA}/z", "test8"); diff --git a/server/src/test/java/org/elasticsearch/common/util/CuckooFilterTests.java b/server/src/test/java/org/elasticsearch/common/util/CuckooFilterTests.java index 47e9081d815..61d2dd62c9a 100644 --- a/server/src/test/java/org/elasticsearch/common/util/CuckooFilterTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/CuckooFilterTests.java @@ -30,8 +30,8 @@ public class CuckooFilterTests extends AbstractWireSerializingTestCase Collections.emptyList(), - ESAllocationTestCase.createAllocationService(), - Collections.emptyList(), mock(GatewayMetaState.class), (s, p, r) -> {}); + ESAllocationTestCase.createAllocationService(), Collections.emptyList(), (s, p, r) -> {}); zenDiscovery.start(); return zenDiscovery; } diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 40f9c73592d..5d6777613f9 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.env; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.util.Constants; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.LuceneTestCase; @@ -390,7 +389,6 @@ public class NodeEnvironmentTests extends ESTestCase { } public void testCustomDataPaths() throws Exception { - assumeFalse("Fails on Windows, see https://github.com/elastic/elasticsearch/issues/45333", Constants.WINDOWS); String[] dataPaths = tmpPaths(); NodeEnvironment env = newNodeEnvironment(dataPaths, "/tmp", Settings.EMPTY); @@ -406,7 +404,8 @@ public class NodeEnvironmentTests extends ESTestCase { assertTrue("settings with path_data should have a custom data path", s2.hasCustomDataPath()); assertThat(env.availableShardPaths(sid), equalTo(env.availableShardPaths(sid))); - assertThat(env.resolveCustomLocation(s2, sid), equalTo(PathUtils.get("/tmp/foo/0/" + index.getUUID() + "/0"))); + assertThat(env.resolveCustomLocation(s2, sid).toAbsolutePath(), + equalTo(PathUtils.get("/tmp/foo/0/" + index.getUUID() + "/0").toAbsolutePath())); assertThat("shard paths with a custom data_path should contain only regular paths", env.availableShardPaths(sid), @@ -418,7 +417,8 @@ public class NodeEnvironmentTests extends ESTestCase { IndexSettings s3 = new IndexSettings(s2.getIndexMetaData(), Settings.builder().build()); assertThat(env.availableShardPaths(sid), equalTo(env.availableShardPaths(sid))); - assertThat(env.resolveCustomLocation(s3, sid), equalTo(PathUtils.get("/tmp/foo/0/" + index.getUUID() + "/0"))); + assertThat(env.resolveCustomLocation(s3, sid).toAbsolutePath(), + equalTo(PathUtils.get("/tmp/foo/0/" + index.getUUID() + "/0").toAbsolutePath())); assertThat("shard paths with a custom data_path should contain only regular paths", env.availableShardPaths(sid), diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java index 13348cef75f..107cc7541fe 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -63,9 +63,9 @@ public class GatewayMetaStatePersistedStateTests extends ESTestCase { super.tearDown(); } - private MockGatewayMetaState newGateway() throws IOException { - MockGatewayMetaState gateway = new MockGatewayMetaState(settings, nodeEnvironment, xContentRegistry(), localNode); - gateway.applyClusterStateUpdaters(); + private MockGatewayMetaState newGateway() { + final MockGatewayMetaState gateway = new MockGatewayMetaState(settings, nodeEnvironment, xContentRegistry(), localNode); + gateway.start(); return gateway; } diff --git a/server/src/test/java/org/elasticsearch/index/RequiredPipelineIT.java b/server/src/test/java/org/elasticsearch/index/RequiredPipelineIT.java new file mode 100644 index 00000000000..2897275ec36 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/RequiredPipelineIT.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESIntegTestCase; + +import java.io.IOException; +import java.util.Collections; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; + +public class RequiredPipelineIT extends ESIntegTestCase { + + public void testRequiredPipeline() { + final Settings settings = Settings.builder().put(IndexSettings.REQUIRED_PIPELINE.getKey(), "required_pipeline").build(); + createIndex("index", settings); + + // this asserts that the required_pipeline was used, without us having to actually create the pipeline etc. + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> client().prepareIndex("index", "_doc", "1").setSource(Collections.singletonMap("field", "value")).get()); + assertThat(e, hasToString(containsString("pipeline with id [required_pipeline] does not exist"))); + } + + public void testDefaultAndRequiredPipeline() { + final Settings settings = Settings.builder() + .put(IndexSettings.DEFAULT_PIPELINE.getKey(), "default_pipeline") + .put(IndexSettings.REQUIRED_PIPELINE.getKey(), "required_pipeline") + .build(); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> createIndex("index", settings)); + assertThat( + e, + hasToString(containsString("index has a default pipeline [default_pipeline] and a required pipeline [required_pipeline]"))); + } + + public void testDefaultAndRequiredPipelineFromTemplates() { + final int lowOrder = randomIntBetween(0, Integer.MAX_VALUE - 1); + final int highOrder = randomIntBetween(lowOrder + 1, Integer.MAX_VALUE); + final int requiredPipelineOrder; + final int defaultPipelineOrder; + if (randomBoolean()) { + defaultPipelineOrder = lowOrder; + requiredPipelineOrder = highOrder; + } else { + defaultPipelineOrder = highOrder; + requiredPipelineOrder = lowOrder; + } + final Settings defaultPipelineSettings = + Settings.builder().put(IndexSettings.DEFAULT_PIPELINE.getKey(), "default_pipeline").build(); + admin().indices() + .preparePutTemplate("default") + .setPatterns(Collections.singletonList("index*")) + .setOrder(defaultPipelineOrder) + .setSettings(defaultPipelineSettings) + .get(); + final Settings requiredPipelineSettings = + Settings.builder().put(IndexSettings.REQUIRED_PIPELINE.getKey(), "required_pipeline").build(); + admin().indices() + .preparePutTemplate("required") + .setPatterns(Collections.singletonList("index*")) + .setOrder(requiredPipelineOrder) + .setSettings(requiredPipelineSettings) + .get(); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> client().prepareIndex("index", "_doc", "1").setSource(Collections.singletonMap("field", "value")).get()); + assertThat( + e, + hasToString(containsString( + "required pipeline [required_pipeline] and default pipeline [default_pipeline] can not both be set"))); + } + + public void testHighOrderRequiredPipelinePreferred() throws IOException { + final int lowOrder = randomIntBetween(0, Integer.MAX_VALUE - 1); + final int highOrder = randomIntBetween(lowOrder + 1, Integer.MAX_VALUE); + final Settings defaultPipelineSettings = + Settings.builder().put(IndexSettings.REQUIRED_PIPELINE.getKey(), "low_order_required_pipeline").build(); + admin().indices() + .preparePutTemplate("default") + .setPatterns(Collections.singletonList("index*")) + .setOrder(lowOrder) + .setSettings(defaultPipelineSettings) + .get(); + final Settings requiredPipelineSettings = + Settings.builder().put(IndexSettings.REQUIRED_PIPELINE.getKey(), "high_order_required_pipeline").build(); + admin().indices() + .preparePutTemplate("required") + .setPatterns(Collections.singletonList("index*")) + .setOrder(highOrder) + .setSettings(requiredPipelineSettings) + .get(); + + // this asserts that the high_order_required_pipeline was selected, without us having to actually create the pipeline etc. + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> client().prepareIndex("index", "_doc", "1").setSource(Collections.singletonMap("field", "value")).get()); + assertThat(e, hasToString(containsString("pipeline with id [high_order_required_pipeline] does not exist"))); + } + + public void testRequiredPipelineAndRequestPipeline() { + final Settings settings = Settings.builder().put(IndexSettings.REQUIRED_PIPELINE.getKey(), "required_pipeline").build(); + createIndex("index", settings); + final IndexRequestBuilder builder = client().prepareIndex("index", "_doc", "1"); + builder.setSource(Collections.singletonMap("field", "value")); + builder.setPipeline("request_pipeline"); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::get); + assertThat( + e, + hasToString(containsString("request pipeline [request_pipeline] can not override required pipeline [required_pipeline]"))); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index cb3af392d0b..3b4bf428adf 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -163,7 +163,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase { protected Nested createNested(IndexSearcher searcher, Query parentFilter, Query childFilter) throws IOException { BitsetFilterCache s = indexService.cache().bitsetFilterCache(); - return new Nested(s.getBitSetProducer(parentFilter), childFilter, null, IndexSearcher::new); + return new Nested(s.getBitSetProducer(parentFilter), childFilter, null, searcher); } public void testEmpty() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java index fa3c9c3e4bd..479f4d7fc55 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.DateMathParser; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; @@ -176,9 +177,9 @@ public class DateFieldTypeTests extends FieldTypeTestCase { Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).build(); QueryShardContext context = new QueryShardContext(0, - new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), - indexSettings), - null, null, null, null, null, null, xContentRegistry(), writableRegistry(), null, null, () -> nowInMillis, null); + new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings), + BigArrays.NON_RECYCLING_INSTANCE, null, null, null, null, null, + xContentRegistry(), writableRegistry(), null, null, () -> nowInMillis, null); MappedFieldType ft = createDefaultFieldType(); ft.setName("field"); String date = "2015-10-12T14:10:55"; @@ -200,7 +201,8 @@ public class DateFieldTypeTests extends FieldTypeTestCase { .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).build(); QueryShardContext context = new QueryShardContext(0, new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings), - null, null, null, null, null, null, xContentRegistry(), writableRegistry(), null, null, () -> nowInMillis, null); + BigArrays.NON_RECYCLING_INSTANCE, null, null, null, null, null, xContentRegistry(), writableRegistry(), + null, null, () -> nowInMillis, null); MappedFieldType ft = createDefaultFieldType(); ft.setName("field"); String date1 = "2015-10-12T14:10:55"; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java index d0d4a6b1d15..a1ad67a0550 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldMapperTests.java @@ -43,7 +43,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { return set; } - private static SortedSet set(T... values) { + private static SortedSet set(String... values) { return new TreeSet<>(Arrays.asList(values)); } @@ -114,6 +114,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { XContentType.JSON)); assertFieldNames(set("field"), doc); + assertWarnings(FieldNamesFieldMapper.TypeParser.ENABLED_DEPRECATION_MESSAGE.replace("{}", "test")); } public void testDisabled() throws Exception { @@ -133,6 +134,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { XContentType.JSON)); assertNull(doc.rootDoc().get("_field_names")); + assertWarnings(FieldNamesFieldMapper.TypeParser.ENABLED_DEPRECATION_MESSAGE.replace("{}", "test")); } public void testMergingMappings() throws Exception { @@ -152,5 +154,6 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { mapperEnabled = mapperService.merge("type", new CompressedXContent(enabledMapping), MapperService.MergeReason.MAPPING_UPDATE); assertTrue(mapperEnabled.metadataMapper(FieldNamesFieldMapper.class).fieldType().isEnabled()); + assertWarnings(FieldNamesFieldMapper.TypeParser.ENABLED_DEPRECATION_MESSAGE.replace("{}", "test")); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java index d01c8efe6c7..9bbeecdfc8f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldNamesFieldTypeTests.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.TermQuery; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryShardContext; import org.junit.Before; @@ -66,7 +67,8 @@ public class FieldNamesFieldTypeTests extends FieldTypeTestCase { when(mapperService.simpleMatchToFullName("field_name")).thenReturn(Collections.singleton("field_name")); QueryShardContext queryShardContext = new QueryShardContext(0, - indexSettings, null, null, null, mapperService, null, null, null, null, null, null, () -> 0L, null); + indexSettings, BigArrays.NON_RECYCLING_INSTANCE, null, null, mapperService, + null, null, null, null, null, null, () -> 0L, null); fieldNamesFieldType.setEnabled(true); Query termQuery = fieldNamesFieldType.termQuery("field_name", queryShardContext); assertEquals(new TermQuery(new Term(FieldNamesFieldMapper.CONTENT_TYPE, "field_name")), termQuery); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java index 40f24bebba0..16fe2ceee8f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.RangeFieldMapper.RangeFieldType; import org.elasticsearch.index.query.QueryShardContext; @@ -227,8 +228,8 @@ public class RangeFieldTypeTests extends FieldTypeTestCase { Settings indexSettings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(randomAlphaOfLengthBetween(1, 10), indexSettings); - return new QueryShardContext(0, idxSettings, null, null, null, null, null, null, xContentRegistry(), - writableRegistry(), null, null, () -> nowInMillis, null); + return new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE, null, null, null, null, null, + xContentRegistry(), writableRegistry(), null, null, () -> nowInMillis, null); } public void testDateRangeQueryUsingMappingFormat() { diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java index 01a11450392..15f9b52d23b 100644 --- a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.script.Script; @@ -370,9 +371,8 @@ public class IntervalQueryBuilderTests extends AbstractQueryTestCase innerHitInternals = new HashMap<>(); InnerHitContextBuilder.extractInnerHits(queryBuilder, innerHitInternals); + InnerHitsContext innerHitsContext = new InnerHitsContext(); for (InnerHitContextBuilder builder : innerHitInternals.values()) { - builder.build(searchContext, searchContext.innerHits()); + builder.build(searchContext, innerHitsContext); } - assertNotNull(searchContext.innerHits()); - assertEquals(1, searchContext.innerHits().getInnerHits().size()); - assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName())); - InnerHitsContext.InnerHitSubContext innerHits = searchContext.innerHits().getInnerHits().get(queryBuilder.innerHit().getName()); + assertEquals(1, innerHitsContext.getInnerHits().size()); + assertTrue(innerHitsContext.getInnerHits().containsKey(queryBuilder.innerHit().getName())); + InnerHitsContext.InnerHitSubContext innerHits = innerHitsContext.getInnerHits().get(queryBuilder.innerHit().getName()); assertEquals(innerHits.size(), queryBuilder.innerHit().getSize()); assertEquals(innerHits.sort().sort.getSort().length, 1); assertEquals(innerHits.sort().sort.getSort()[0].getField(), INT_FIELD_NAME); @@ -328,7 +328,7 @@ public class NestedQueryBuilderTests extends AbstractQueryTestCase - mappedFieldType.fielddataBuilder(idxName).build(indexSettings, mappedFieldType, null, null, null) - , mapperService, null, null, NamedXContentRegistry.EMPTY, new NamedWriteableRegistry(Collections.emptyList()), null, null, - () -> nowInMillis, clusterAlias); + 0, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, null, + (mappedFieldType, idxName) -> + mappedFieldType.fielddataBuilder(idxName).build(indexSettings, mappedFieldType, null, null, null), + mapperService, null, null, NamedXContentRegistry.EMPTY, new NamedWriteableRegistry(Collections.emptyList()), + null, null, () -> nowInMillis, clusterAlias); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index ea36a90e59b..39e036d7289 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -59,6 +59,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.search.QueryStringQueryParser; import org.elasticsearch.search.internal.SearchContext; @@ -1058,11 +1059,14 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase DISJOINT assertEquals(Relation.DISJOINT, range.getRelation(context)); diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java index 4c59e25804a..d64bf557a01 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; @@ -274,7 +275,7 @@ public class SpanMultiTermQueryBuilderTests extends AbstractQueryTestCase query.rewrite(reader)); assertThat(exc.getMessage(), containsString("maxClauseCount")); } finally { diff --git a/server/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java b/server/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java index f1ebcd97174..a6e5f815cea 100644 --- a/server/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java +++ b/server/src/test/java/org/elasticsearch/index/query/plugin/CustomQueryParserIT.java @@ -20,8 +20,6 @@ package org.elasticsearch.index.query.plugin; import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Before; @@ -64,10 +62,4 @@ public class CustomQueryParserIT extends ESIntegTestCase { public void testCustomDummyQueryWithinBooleanQuery() { assertHitCount(client().prepareSearch("index").setQuery(new BoolQueryBuilder().must(new DummyQueryBuilder())).get(), 1L); } - - private static QueryShardContext queryShardContext() { - IndicesService indicesService = internalCluster().getDataNodeInstance(IndicesService.class); - return indicesService.indexServiceSafe(resolveIndex("index")).newQueryShardContext( - randomInt(20), null, () -> { throw new UnsupportedOperationException(); }, null); - } } diff --git a/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java b/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java index 40a8251fb24..73dae837285 100644 --- a/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/NestedHelperTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -327,7 +328,7 @@ public class NestedHelperTests extends ESSingleNodeTestCase { } public void testNested() throws IOException { - QueryShardContext context = indexService.newQueryShardContext(0, new MultiReader(), () -> 0, null); + QueryShardContext context = indexService.newQueryShardContext(0, new IndexSearcher(new MultiReader()), () -> 0, null); NestedQueryBuilder queryBuilder = new NestedQueryBuilder("nested1", new MatchAllQueryBuilder(), ScoreMode.Avg); ESToParentBlockJoinQuery query = (ESToParentBlockJoinQuery) queryBuilder.toQuery(context); diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index 4e7b03657d3..2480192cdf7 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -613,7 +613,7 @@ public class NestedSortingTests extends AbstractFieldDataTestCase { DirectoryReader reader = DirectoryReader.open(writer); reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0)); IndexSearcher searcher = new IndexSearcher(reader); - QueryShardContext queryShardContext = indexService.newQueryShardContext(0, reader, () -> 0L, null); + QueryShardContext queryShardContext = indexService.newQueryShardContext(0, searcher, () -> 0L, null); FieldSortBuilder sortBuilder = new FieldSortBuilder("chapters.paragraphs.word_count"); sortBuilder.setNestedSort(new NestedSortBuilder("chapters").setNestedSort(new NestedSortBuilder("chapters.paragraphs"))); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index bdf7acf478b..7120675c57d 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -719,6 +719,36 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes assertThat(replicationTracker.loadRetentionLeases(path), equalTo(replicationTracker.getRetentionLeases())); } + public void testRenewLeaseWithLowerRetainingSequenceNumber() throws Exception { + final AllocationId allocationId = AllocationId.newInitializing(); + long primaryTerm = randomLongBetween(1, Long.MAX_VALUE); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + primaryTerm, + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> {}, + OPS_BASED_RECOVERY_ALWAYS_REASONABLE); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId)); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomNonNegativeLong(); + final String source = randomAlphaOfLength(8); + replicationTracker.addRetentionLease(id, retainingSequenceNumber, source, ActionListener.wrap(() -> {})); + final long lowerRetainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, retainingSequenceNumber - 1); + final RetentionLeaseInvalidRetainingSeqNoException e = expectThrows(RetentionLeaseInvalidRetainingSeqNoException.class, + () -> replicationTracker.renewRetentionLease(id, lowerRetainingSequenceNumber, source)); + assertThat(e, hasToString(containsString("the current retention lease with [" + id + "]" + + " is retaining a higher sequence number [" + retainingSequenceNumber + "]" + + " than the new retaining sequence number [" + lowerRetainingSequenceNumber + "] from [" + source + "]"))); + } + private void assertRetentionLeases( final ReplicationTracker replicationTracker, final int size, diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index 3710988772a..5b40fbcc55f 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -309,7 +309,7 @@ public class FlushIT extends ESIntegTestCase { assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1)); assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas)); assertThat(partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason, equalTo( - "out of sync replica; num docs on replica [" + (numDocs + extraDocs) + "]; num docs on primary [" + numDocs + "]")); + "ongoing indexing operations: num docs on replica [" + (numDocs + extraDocs) + "]; num docs on primary [" + numDocs + "]")); // Index extra documents to all shards - synced-flush should be ok. for (IndexShard indexShard : indexShards) { // Do reindex documents to the out of sync replica to avoid trigger merges diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java index b020c3d6b88..b71acc9dfe6 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.recovery; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.cluster.ClusterState; @@ -28,14 +29,15 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; -import static org.hamcrest.Matchers.equalTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class IndexPrimaryRelocationIT extends ESIntegTestCase { @@ -54,7 +56,7 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase { Thread indexingThread = new Thread() { @Override public void run() { - while (finished.get() == false) { + while (finished.get() == false && numAutoGenDocs.get() < 10_000) { IndexResponse indexResponse = client().prepareIndex("test", "type", "id").setSource("field", "value").get(); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); DeleteResponse deleteResponse = client().prepareDelete("test", "type", "id").get(); @@ -80,8 +82,18 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase { .add(new MoveAllocationCommand("test", 0, relocationSource.getId(), relocationTarget.getId())) .execute().actionGet(); ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth() + .setTimeout(TimeValue.timeValueSeconds(60)) .setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).execute().actionGet(); - assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); + if (clusterHealthResponse.isTimedOut()) { + final String hotThreads = client().admin().cluster().prepareNodesHotThreads().setIgnoreIdleThreads(false).get().getNodes() + .stream().map(NodeHotThreads::getHotThreads).collect(Collectors.joining("\n")); + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + logger.info("timed out for waiting for relocation iteration [{}] \ncluster state {} \nhot threads {}", + i, clusterState, hotThreads); + finished.set(true); + indexingThread.join(); + throw new AssertionError("timed out waiting for relocation iteration [" + i + "] "); + } logger.info("--> [iteration {}] relocation complete", i); relocationSource = relocationTarget; // indexing process aborted early, no need for more relocations as test has already failed diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index ae7d6f07183..e7277450e6b 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -863,7 +863,8 @@ public class IndexRecoveryIT extends ESIntegTestCase { final String indexName = "test"; client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)).get(); + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.getKey(), 1.0)).get(); ensureGreen(indexName); // Perform some replicated operations so the replica isn't simply empty, because ops-based recovery isn't better in that case diff --git a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index eb0e564e92d..c4fb87dfc9e 100644 --- a/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -62,6 +62,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.ArrayList; @@ -1007,6 +1008,7 @@ public class IndexStatsIT extends ESIntegTestCase { assertEquals(total, shardTotal); } + @TestLogging(value = "_root:DEBUG", reason = "https://github.com/elastic/elasticsearch/issues/46701") public void testFilterCacheStats() throws Exception { Settings settings = Settings.builder().put(indexSettings()) .put("number_of_replicas", 0) @@ -1064,11 +1066,14 @@ public class IndexStatsIT extends ESIntegTestCase { }); flush("index"); } + logger.info("--> force merging to a single segment"); ForceMergeResponse forceMergeResponse = client().admin().indices().prepareForceMerge("index").setFlush(true).setMaxNumSegments(1).get(); assertAllSuccessful(forceMergeResponse); + logger.info("--> refreshing"); refresh(); + logger.info("--> verifying that cache size is 0"); response = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeQueryCacheStats(response); assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L)); diff --git a/server/src/test/java/org/elasticsearch/ingest/CompoundProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/CompoundProcessorTests.java index e44f1fb4700..b3b8ee9762d 100644 --- a/server/src/test/java/org/elasticsearch/ingest/CompoundProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/CompoundProcessorTests.java @@ -28,6 +28,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; import java.util.function.LongSupplier; import static org.hamcrest.CoreMatchers.equalTo; @@ -75,7 +76,7 @@ public class CompoundProcessorTests extends ESTestCase { } public void testSingleProcessorWithException() throws Exception { - TestProcessor processor = new TestProcessor(ingestDocument -> {throw new RuntimeException("error");}); + TestProcessor processor = new TestProcessor(new RuntimeException("error")); LongSupplier relativeTimeProvider = mock(LongSupplier.class); when(relativeTimeProvider.getAsLong()).thenReturn(0L); CompoundProcessor compoundProcessor = new CompoundProcessor(relativeTimeProvider, processor); @@ -91,7 +92,7 @@ public class CompoundProcessorTests extends ESTestCase { } public void testIgnoreFailure() throws Exception { - TestProcessor processor1 = new TestProcessor(ingestDocument -> {throw new RuntimeException("error");}); + TestProcessor processor1 = new TestProcessor(new RuntimeException("error")); TestProcessor processor2 = new TestProcessor(ingestDocument -> {ingestDocument.setFieldValue("field", "value");}); LongSupplier relativeTimeProvider = mock(LongSupplier.class); when(relativeTimeProvider.getAsLong()).thenReturn(0L); @@ -106,7 +107,7 @@ public class CompoundProcessorTests extends ESTestCase { } public void testSingleProcessorWithOnFailureProcessor() throws Exception { - TestProcessor processor1 = new TestProcessor("id", "first", ingestDocument -> {throw new RuntimeException("error");}); + TestProcessor processor1 = new TestProcessor("id", "first", new RuntimeException("error")); TestProcessor processor2 = new TestProcessor(ingestDocument -> { Map ingestMetadata = ingestDocument.getIngestMetadata(); assertThat(ingestMetadata.size(), equalTo(3)); @@ -128,7 +129,7 @@ public class CompoundProcessorTests extends ESTestCase { } public void testSingleProcessorWithOnFailureDropProcessor() throws Exception { - TestProcessor processor1 = new TestProcessor("id", "first", ingestDocument -> {throw new RuntimeException("error");}); + TestProcessor processor1 = new TestProcessor("id", "first", new RuntimeException("error")); Processor processor2 = new Processor() { @Override public IngestDocument execute(IngestDocument ingestDocument) throws Exception { @@ -159,8 +160,8 @@ public class CompoundProcessorTests extends ESTestCase { } public void testSingleProcessorWithNestedFailures() throws Exception { - TestProcessor processor = new TestProcessor("id", "first", ingestDocument -> {throw new RuntimeException("error");}); - TestProcessor processorToFail = new TestProcessor("id2", "second", ingestDocument -> { + TestProcessor processor = new TestProcessor("id", "first", new RuntimeException("error")); + TestProcessor processorToFail = new TestProcessor("id2", "second", (Consumer) ingestDocument -> { Map ingestMetadata = ingestDocument.getIngestMetadata(); assertThat(ingestMetadata.size(), equalTo(3)); assertThat(ingestMetadata.get(CompoundProcessor.ON_FAILURE_MESSAGE_FIELD), equalTo("error")); @@ -189,7 +190,7 @@ public class CompoundProcessorTests extends ESTestCase { } public void testCompoundProcessorExceptionFailWithoutOnFailure() throws Exception { - TestProcessor firstProcessor = new TestProcessor("id1", "first", ingestDocument -> {throw new RuntimeException("error");}); + TestProcessor firstProcessor = new TestProcessor("id1", "first", new RuntimeException("error")); TestProcessor secondProcessor = new TestProcessor("id3", "second", ingestDocument -> { Map ingestMetadata = ingestDocument.getIngestMetadata(); assertThat(ingestMetadata.entrySet(), hasSize(3)); @@ -212,9 +213,9 @@ public class CompoundProcessorTests extends ESTestCase { } public void testCompoundProcessorExceptionFail() throws Exception { - TestProcessor firstProcessor = new TestProcessor("id1", "first", ingestDocument -> {throw new RuntimeException("error");}); + TestProcessor firstProcessor = new TestProcessor("id1", "first", new RuntimeException("error")); TestProcessor failProcessor = - new TestProcessor("tag_fail", "fail", ingestDocument -> {throw new RuntimeException("custom error message");}); + new TestProcessor("tag_fail", "fail", new RuntimeException("custom error message")); TestProcessor secondProcessor = new TestProcessor("id3", "second", ingestDocument -> { Map ingestMetadata = ingestDocument.getIngestMetadata(); assertThat(ingestMetadata.entrySet(), hasSize(3)); @@ -238,9 +239,9 @@ public class CompoundProcessorTests extends ESTestCase { } public void testCompoundProcessorExceptionFailInOnFailure() throws Exception { - TestProcessor firstProcessor = new TestProcessor("id1", "first", ingestDocument -> {throw new RuntimeException("error");}); + TestProcessor firstProcessor = new TestProcessor("id1", "first", new RuntimeException("error")); TestProcessor failProcessor = - new TestProcessor("tag_fail", "fail", ingestDocument -> {throw new RuntimeException("custom error message");}); + new TestProcessor("tag_fail", "fail", new RuntimeException("custom error message")); TestProcessor secondProcessor = new TestProcessor("id3", "second", ingestDocument -> { Map ingestMetadata = ingestDocument.getIngestMetadata(); assertThat(ingestMetadata.entrySet(), hasSize(3)); @@ -264,8 +265,8 @@ public class CompoundProcessorTests extends ESTestCase { } public void testBreakOnFailure() throws Exception { - TestProcessor firstProcessor = new TestProcessor("id1", "first", ingestDocument -> {throw new RuntimeException("error1");}); - TestProcessor secondProcessor = new TestProcessor("id2", "second", ingestDocument -> {throw new RuntimeException("error2");}); + TestProcessor firstProcessor = new TestProcessor("id1", "first", new RuntimeException("error1")); + TestProcessor secondProcessor = new TestProcessor("id2", "second", new RuntimeException("error2")); TestProcessor onFailureProcessor = new TestProcessor("id2", "on_failure", ingestDocument -> {}); LongSupplier relativeTimeProvider = mock(LongSupplier.class); when(relativeTimeProvider.getAsLong()).thenReturn(0L); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java b/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java index 6e5d862372a..2e3a23cd3be 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestClientIT.java @@ -272,4 +272,25 @@ public class IngestClientIT extends ESIntegTestCase { GetPipelineResponse response = client().admin().cluster().prepareGetPipeline("_id2").get(); assertFalse(response.isFound()); } + + public void testWithDedicatedMaster() throws Exception { + String masterOnlyNode = internalCluster().startMasterOnlyNode(); + BytesReference source = BytesReference.bytes(jsonBuilder().startObject() + .field("description", "my_pipeline") + .startArray("processors") + .startObject() + .startObject("test") + .endObject() + .endObject() + .endArray() + .endObject()); + PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, XContentType.JSON); + client().admin().cluster().putPipeline(putPipelineRequest).get(); + + BulkItemResponse item = client(masterOnlyNode).prepareBulk().add( + client().prepareIndex("test", "type").setSource("field", "value2", "drop", true).setPipeline("_id")).get() + .getItems()[0]; + assertFalse(item.isFailed()); + assertEquals("auto-generated", item.getResponse().getId()); + } } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index 2f34610e6a9..f82ca25241f 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -70,6 +70,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.function.IntConsumer; import java.util.function.LongSupplier; @@ -1176,6 +1177,44 @@ public class IngestServiceTests extends ESTestCase { verify(dropHandler, times(1)).accept(0); } + public void testIngestClusterStateListeners_orderOfExecution() { + final AtomicInteger counter = new AtomicInteger(0); + + // Ingest cluster state listener state should be invoked first: + Consumer ingestClusterStateListener = clusterState -> { + assertThat(counter.compareAndSet(0, 1), is(true)); + }; + + // Processor factory should be invoked secondly after ingest cluster state listener: + IngestPlugin testPlugin = new IngestPlugin() { + @Override + public Map getProcessors(Processor.Parameters parameters) { + return Collections.singletonMap("test", (factories, tag, config) -> { + assertThat(counter.compareAndSet(1, 2), is(true)); + return new FakeProcessor("test", tag, ingestDocument -> {}); + }); + } + }; + + // Create ingest service: + ThreadPool tp = mock(ThreadPool.class); + Client client = mock(Client.class); + IngestService ingestService = + new IngestService(mock(ClusterService.class), tp, null, null, null, Arrays.asList(testPlugin), client); + ingestService.addIngestClusterStateListener(ingestClusterStateListener); + + // Create pipeline and apply the resulting cluster state, which should update the counter in the right order: + PutPipelineRequest putRequest = new PutPipelineRequest("_id", + new BytesArray("{\"processors\": [{\"test\" : {}}]}"), XContentType.JSON); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); // Start empty + ClusterState previousClusterState = clusterState; + clusterState = IngestService.innerPut(putRequest, clusterState); + ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + + // Sanity check that counter has been updated twice: + assertThat(counter.get(), equalTo(2)); + } + private IngestDocument eqIndexTypeId(final Map source) { return argThat(new IngestDocumentMatcher("_index", "_type", "_id", -3L, VersionType.INTERNAL, source)); } diff --git a/server/src/test/java/org/elasticsearch/ingest/TrackingResultProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/TrackingResultProcessorTests.java index 7f68b33dd86..b7afb13deaf 100644 --- a/server/src/test/java/org/elasticsearch/ingest/TrackingResultProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/TrackingResultProcessorTests.java @@ -98,7 +98,7 @@ public class TrackingResultProcessorTests extends ESTestCase { public void testActualCompoundProcessorWithOnFailure() throws Exception { RuntimeException exception = new RuntimeException("fail"); - TestProcessor failProcessor = new TestProcessor("fail", "test", ingestDocument -> { throw exception; }); + TestProcessor failProcessor = new TestProcessor("fail", "test", exception); TestProcessor onFailureProcessor = new TestProcessor("success", "test", ingestDocument -> {}); CompoundProcessor actualProcessor = new CompoundProcessor(false, Arrays.asList(new CompoundProcessor(false, diff --git a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java index 369daef08d1..a448736ed55 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -78,7 +78,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.stream.Stream; @@ -446,7 +445,7 @@ public class RelocationIT extends ESIntegTestCase { } } - public void testIndexAndRelocateConcurrently() throws ExecutionException, InterruptedException { + public void testIndexAndRelocateConcurrently() throws Exception { int halfNodes = randomIntBetween(1, 3); Settings[] nodeSettings = Stream.concat( Stream.generate(() -> Settings.builder().put("node.attr.color", "blue").build()).limit(halfNodes), @@ -494,7 +493,7 @@ public class RelocationIT extends ESIntegTestCase { numDocs *= 2; logger.info(" --> waiting for relocation to complete"); - ensureGreen("test"); // move all shards to the new nodes (it waits on relocation) + ensureGreen(TimeValue.timeValueSeconds(60), "test"); // move all shards to the new nodes (it waits on relocation) final int numIters = randomIntBetween(10, 20); for (int i = 0; i < numIters; i++) { diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index d79763a9f6e..7a1bcefea9d 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -161,7 +161,7 @@ public class RepositoriesServiceTests extends ESTestCase { @Override public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, List shardFailures, long repositoryStateId, - boolean includeGlobalState, Map userMetadata) { + boolean includeGlobalState, MetaData metaData, Map userMetadata) { return null; } @@ -202,7 +202,7 @@ public class RepositoriesServiceTests extends ESTestCase { @Override public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, IndexCommit - snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { } diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java index b89635af97d..b79d250ecee 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsBlobStoreRepositoryIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.repositories.fs; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; @@ -44,10 +45,14 @@ public class FsBlobStoreRepositoryIT extends ESBlobStoreRepositoryIntegTestCase @Override protected Settings repositorySettings() { - return Settings.builder() - .put(super.repositorySettings()) - .put("location", randomRepoPath()) - .build(); + final Settings.Builder settings = Settings.builder(); + settings.put(super.repositorySettings()); + settings.put("location", randomRepoPath()); + if (randomBoolean()) { + long size = 1 << randomInt(10); + settings.put("chunk_size", new ByteSizeValue(size, ByteSizeUnit.KB)); + } + return settings.build(); } public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOException, ExecutionException, InterruptedException { diff --git a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java index 1dc7a6263d3..6c48a19cbb5 100644 --- a/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/fs/FsRepositoryTests.java @@ -35,6 +35,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOSupplier; import org.apache.lucene.util.TestUtil; import org.elasticsearch.Version; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -99,10 +100,12 @@ public class FsRepositoryTests extends ESTestCase { IndexId indexId = new IndexId(idxSettings.getIndex().getName(), idxSettings.getUUID()); IndexCommit indexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory()); + final PlainActionFuture future1 = PlainActionFuture.newFuture(); runGeneric(threadPool, () -> { IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); repository.snapshotShard(store, null, snapshotId, indexId, indexCommit, - snapshotStatus); + snapshotStatus, future1); + future1.actionGet(); IndexShardSnapshotStatus.Copy copy = snapshotStatus.asCopy(); assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); }); @@ -124,9 +127,11 @@ public class FsRepositoryTests extends ESTestCase { SnapshotId incSnapshotId = new SnapshotId("test1", "test1"); IndexCommit incIndexCommit = Lucene.getIndexCommit(Lucene.readSegmentInfos(store.directory()), store.directory()); Collection commitFileNames = incIndexCommit.getFileNames(); + final PlainActionFuture future2 = PlainActionFuture.newFuture(); runGeneric(threadPool, () -> { IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); - repository.snapshotShard(store, null, incSnapshotId, indexId, incIndexCommit, snapshotStatus); + repository.snapshotShard(store, null, incSnapshotId, indexId, incIndexCommit, snapshotStatus, future2); + future2.actionGet(); IndexShardSnapshotStatus.Copy copy = snapshotStatus.asCopy(); assertEquals(2, copy.getIncrementalFileCount()); assertEquals(commitFileNames.size(), copy.getTotalFileCount()); @@ -198,4 +203,5 @@ public class FsRepositoryTests extends ESTestCase { return docs; } } + } diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index 4515897df30..d097afad005 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -56,6 +56,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -175,6 +176,24 @@ public class RestControllerTests extends ESTestCase { verify(controller).registerAsDeprecatedHandler(deprecatedMethod, deprecatedPath, handler, deprecationMessage, logger); } + public void testRegisterSecondMethodWithDifferentNamedWildcard() { + final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService); + + RestRequest.Method firstMethod = randomFrom(RestRequest.Method.values()); + RestRequest.Method secondMethod = + randomFrom(Arrays.stream(RestRequest.Method.values()).filter(m -> m != firstMethod).collect(Collectors.toList())); + + final String path = "/_" + randomAlphaOfLengthBetween(1, 6); + + RestHandler handler = mock(RestHandler.class); + restController.registerHandler(firstMethod, path + "/{wildcard1}", handler); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> restController.registerHandler(secondMethod, path + "/{wildcard2}", handler)); + + assertThat(exception.getMessage(), equalTo("Trying to use conflicting wildcard names for same path: wildcard1 and wildcard2")); + } + public void testRestHandlerWrapper() throws Exception { AtomicBoolean handlerCalled = new AtomicBoolean(false); AtomicBoolean wrapperCalled = new AtomicBoolean(false); diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index 3a1dda8f2fe..eb3cdfd3439 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -61,7 +61,6 @@ import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter; import org.elasticsearch.search.fetch.subphase.highlight.Highlighter; import org.elasticsearch.search.fetch.subphase.highlight.PlainHighlighter; import org.elasticsearch.search.fetch.subphase.highlight.UnifiedHighlighter; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.rescore.RescorerBuilder; @@ -400,8 +399,10 @@ public class SearchModuleTests extends ESTestCase { } @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, - ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { return null; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java index 6a77a89fc58..254b9a2b2f7 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java @@ -59,8 +59,9 @@ public class AggregationCollectorTests extends ESSingleNodeTestCase { try (XContentParser aggParser = createParser(JsonXContent.jsonXContent, agg)) { aggParser.nextToken(); SearchContext context = createSearchContext(index); - final AggregatorFactories factories = AggregatorFactories.parseAggregators(aggParser).build(context, null); - final Aggregator[] aggregators = factories.createTopLevelAggregators(); + final AggregatorFactories factories = + AggregatorFactories.parseAggregators(aggParser).build(context.getQueryShardContext(), null); + final Aggregator[] aggregators = factories.createTopLevelAggregators(context); assertEquals(1, aggregators.length); return aggregators[0].scoreMode().needsScores(); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketAggregatorWrapperTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketAggregatorWrapperTests.java index 9380c3f6f61..b7edd464366 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketAggregatorWrapperTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketAggregatorWrapperTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; @@ -45,11 +46,13 @@ public class MultiBucketAggregatorWrapperTests extends ESTestCase { LeafReaderContext leafReaderContext = MemoryIndex.fromDocument(Collections.emptyList(), new MockAnalyzer(random())) .createSearcher().getIndexReader().leaves().get(0); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(queryShardContext.bigArrays()).thenReturn(bigArrays); SearchContext searchContext = mock(SearchContext.class); when(searchContext.bigArrays()).thenReturn(bigArrays); Aggregator aggregator = mock(Aggregator.class); - AggregatorFactory aggregatorFactory = new TestAggregatorFactory(searchContext, aggregator); + AggregatorFactory aggregatorFactory = new TestAggregatorFactory(queryShardContext, aggregator); LeafBucketCollector wrappedCollector = mock(LeafBucketCollector.class); when(aggregator.getLeafCollector(leafReaderContext)).thenReturn(wrappedCollector); Aggregator wrapper = AggregatorFactory.asMultiBucketAggregator(aggregatorFactory, searchContext, null); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/TestAggregatorFactory.java b/server/src/test/java/org/elasticsearch/search/aggregations/TestAggregatorFactory.java index 4482e19ad25..e035c6b7550 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/TestAggregatorFactory.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/TestAggregatorFactory.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.internal.SearchContext; @@ -41,24 +42,24 @@ public class TestAggregatorFactory extends AggregatorFactory { private final Aggregator aggregator; - TestAggregatorFactory(SearchContext context, Aggregator aggregator) throws IOException { - super("_name", context, null, new AggregatorFactories.Builder(), Collections.emptyMap()); + TestAggregatorFactory(QueryShardContext queryShardContext, Aggregator aggregator) throws IOException { + super("_name", queryShardContext, null, new AggregatorFactories.Builder(), Collections.emptyMap()); this.aggregator = aggregator; } @Override - protected Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List list, + protected Aggregator createInternal(SearchContext searchContext, Aggregator parent, boolean collectsFromSingleBucket, List list, Map metaData) throws IOException { return aggregator; } public static TestAggregatorFactory createInstance() throws IOException { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); - SearchContext searchContext = mock(SearchContext.class); - when(searchContext.bigArrays()).thenReturn(bigArrays); + QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(queryShardContext.bigArrays()).thenReturn(bigArrays); Aggregator aggregator = mock(Aggregator.class); - return new TestAggregatorFactory(searchContext, aggregator); + return new TestAggregatorFactory(queryShardContext, aggregator); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java index 2f99ebbf323..74ee21b2466 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollectorTests.java @@ -67,7 +67,7 @@ public class BestBucketsDeferringCollectorTests extends AggregatorTestCase { Query rewrittenQuery = indexSearcher.rewrite(termQuery); TopDocs topDocs = indexSearcher.search(termQuery, numDocs); - SearchContext searchContext = createSearchContext(indexSearcher, createIndexSettings()); + SearchContext searchContext = createSearchContext(indexSearcher, createIndexSettings(), rewrittenQuery, null); when(searchContext.query()).thenReturn(rewrittenQuery); BestBucketsDeferringCollector collector = new BestBucketsDeferringCollector(searchContext, false) { @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java index ca61855d192..be053e905dc 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java @@ -56,6 +56,7 @@ public class AdjacencyMatrixAggregationBuilderTests extends ESTestCase { IndexMetaData indexMetaData = IndexMetaData.builder("index").settings(settings).build(); IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY); when(indexShard.indexSettings()).thenReturn(indexSettings); + when(queryShardContext.getIndexSettings()).thenReturn(indexSettings); SearchContext context = new TestSearchContext(queryShardContext, indexShard); Map filters = new HashMap<>(3); @@ -66,8 +67,8 @@ public class AdjacencyMatrixAggregationBuilderTests extends ESTestCase { filters.put("filter" + i, queryBuilder); } AdjacencyMatrixAggregationBuilder builder = new AdjacencyMatrixAggregationBuilder("dummy", filters); - IllegalArgumentException ex - = expectThrows(IllegalArgumentException.class, () -> builder.doBuild(context, null, new AggregatorFactories.Builder())); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> builder.doBuild(context.getQueryShardContext(), null, new AggregatorFactories.Builder())); assertThat(ex.getMessage(), equalTo("Number of filters is too large, must be less than or equal to: [2] but was [3]." + "This limit can be set by changing the [" + IndexSettings.MAX_ADJACENCY_MATRIX_FILTERS_SETTING.getKey() + "] index level setting.")); @@ -76,7 +77,7 @@ public class AdjacencyMatrixAggregationBuilderTests extends ESTestCase { Map emptyFilters = Collections.emptyMap(); AdjacencyMatrixAggregationBuilder aggregationBuilder = new AdjacencyMatrixAggregationBuilder("dummy", emptyFilters); - AggregatorFactory factory = aggregationBuilder.doBuild(context, null, new AggregatorFactories.Builder()); + AggregatorFactory factory = aggregationBuilder.doBuild(context.getQueryShardContext(), null, new AggregatorFactories.Builder()); assertThat(factory instanceof AdjacencyMatrixAggregatorFactory, is(true)); assertThat(factory.name(), equalTo("dummy")); assertWarnings("[index.max_adjacency_matrix_filters] setting was deprecated in Elasticsearch and will be " diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java index 38ed1776ec2..373df3e5f26 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.Directory; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; @@ -152,8 +153,8 @@ public class DateHistogramTests extends BaseAggregationTestCase now, null); - when(context.getQueryShardContext()).thenReturn(qsc); + new IndexSettings(IndexMetaData.builder("foo").settings(indexSettings).build(), indexSettings), + BigArrays.NON_RECYCLING_INSTANCE, null, null, null, null, null, xContentRegistry(), writableRegistry(), + null, null, () -> now, null); DateFormatter formatter = DateFormatter.forPattern("dateOptionalTime"); DocValueFormat format = new DocValueFormat.DateTime(formatter, ZoneOffset.UTC, DateFieldMapper.Resolution.MILLISECONDS); ExtendedBounds expected = randomParsedExtendedBounds(); - ExtendedBounds parsed = unparsed(expected).parseAndValidate("test", context, format); + ExtendedBounds parsed = unparsed(expected).parseAndValidate("test", qsc, format); // parsed won't *equal* expected because equal includes the String parts assertEquals(expected.getMin(), parsed.getMin()); assertEquals(expected.getMax(), parsed.getMax()); - parsed = new ExtendedBounds("now", null).parseAndValidate("test", context, format); + parsed = new ExtendedBounds("now", null).parseAndValidate("test", qsc, format); assertEquals(now, (long) parsed.getMin()); assertNull(parsed.getMax()); - parsed = new ExtendedBounds(null, "now").parseAndValidate("test", context, format); + parsed = new ExtendedBounds(null, "now").parseAndValidate("test", qsc, format); assertNull(parsed.getMin()); assertEquals(now, (long) parsed.getMax()); - SearchParseException e = expectThrows(SearchParseException.class, - () -> new ExtendedBounds(100L, 90L).parseAndValidate("test", context, format)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new ExtendedBounds(100L, 90L).parseAndValidate("test", qsc, format)); assertEquals("[extended_bounds.min][100] cannot be greater than [extended_bounds.max][90] for histogram aggregation [test]", e.getMessage()); - e = expectThrows(SearchParseException.class, - () -> unparsed(new ExtendedBounds(100L, 90L)).parseAndValidate("test", context, format)); + e = expectThrows(IllegalArgumentException.class, + () -> unparsed(new ExtendedBounds(100L, 90L)).parseAndValidate("test", qsc, format)); assertEquals("[extended_bounds.min][100] cannot be greater than [extended_bounds.max][90] for histogram aggregation [test]", e.getMessage()); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java index ea65d218fc6..7203b5dd443 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregatorTests.java @@ -23,9 +23,11 @@ import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryShardContext; @@ -416,12 +418,14 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { * is final and cannot be mocked */ @Override - protected QueryShardContext queryShardContextMock(MapperService mapperService, IndexSettings indexSettings, - CircuitBreakerService circuitBreakerService) { + protected QueryShardContext queryShardContextMock(IndexSearcher searcher, + MapperService mapperService, + IndexSettings indexSettings, + CircuitBreakerService circuitBreakerService) { MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, SCRIPTS, Collections.emptyMap()); Map engines = Collections.singletonMap(scriptEngine.getType(), scriptEngine); ScriptService scriptService = new ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS); - return new QueryShardContext(0, indexSettings, null, null, null, mapperService, null, scriptService, + return new QueryShardContext(0, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, null, null, mapperService, null, scriptService, xContentRegistry(), writableRegistry(), null, null, System::currentTimeMillis, null); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java index 01ffdee4173..ddfd2c8c82c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.common.Rounding; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalOrder; @@ -35,7 +36,6 @@ import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -160,13 +160,13 @@ public class PipelineAggregationHelperTests extends ESTestCase { switch (randomIntBetween(0, 2)) { case 0: factory = new HistogramAggregatorFactory("name", mock(ValuesSourceConfig.class), 0.0d, 0.0d, - mock(InternalOrder.class), false, 0L, 0.0d, 1.0d, mock(SearchContext.class), null, + mock(InternalOrder.class), false, 0L, 0.0d, 1.0d, mock(QueryShardContext.class), null, new AggregatorFactories.Builder(), Collections.emptyMap()); break; case 1: factory = new DateHistogramAggregatorFactory("name", mock(ValuesSourceConfig.class), 0L, mock(InternalOrder.class), false, 0L, mock(Rounding.class), mock(Rounding.class), - mock(ExtendedBounds.class), mock(SearchContext.class), mock(AggregatorFactory.class), + mock(ExtendedBounds.class), mock(QueryShardContext.class), mock(AggregatorFactory.class), new AggregatorFactories.Builder(), Collections.emptyMap()); break; case 2: @@ -174,7 +174,7 @@ public class PipelineAggregationHelperTests extends ESTestCase { AutoDateHistogramAggregationBuilder.RoundingInfo[] roundings = new AutoDateHistogramAggregationBuilder.RoundingInfo[1]; factory = new AutoDateHistogramAggregatorFactory("name", mock(ValuesSourceConfig.class), 1, roundings, - mock(SearchContext.class), null, new AggregatorFactories.Builder(), Collections.emptyMap()); + mock(QueryShardContext.class), null, new AggregatorFactories.Builder(), Collections.emptyMap()); } return factory; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java index 32af6178dfb..2831aad4da2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java @@ -42,7 +42,7 @@ public class ValuesSourceConfigTests extends ESSingleNodeTestCase { .get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { - QueryShardContext context = indexService.newQueryShardContext(0, searcher.getIndexReader(), () -> 42L, null); + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); ValuesSourceConfig config = ValuesSourceConfig.resolve( context, null, "bytes", null, null, null, null); @@ -64,7 +64,7 @@ public class ValuesSourceConfigTests extends ESSingleNodeTestCase { .get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { - QueryShardContext context = indexService.newQueryShardContext(0, searcher.getIndexReader(), () -> 42L, null); + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); ValuesSourceConfig config = ValuesSourceConfig.resolve( context, null, "bytes", null, null, null, null); @@ -91,7 +91,7 @@ public class ValuesSourceConfigTests extends ESSingleNodeTestCase { .get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { - QueryShardContext context = indexService.newQueryShardContext(0, searcher.getIndexReader(), () -> 42L, null); + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); ValuesSourceConfig config = ValuesSourceConfig.resolve( context, ValueType.STRING, "bytes", null, null, null, null); ValuesSource.Bytes valuesSource = config.toValuesSource(context); @@ -117,7 +117,7 @@ public class ValuesSourceConfigTests extends ESSingleNodeTestCase { .get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { - QueryShardContext context = indexService.newQueryShardContext(0, searcher.getIndexReader(), () -> 42L, null); + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); ValuesSourceConfig config = ValuesSourceConfig.resolve( context, null, "long", null, null, null, null); @@ -139,7 +139,7 @@ public class ValuesSourceConfigTests extends ESSingleNodeTestCase { .get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { - QueryShardContext context = indexService.newQueryShardContext(0, searcher.getIndexReader(), () -> 42L, null); + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); ValuesSourceConfig config = ValuesSourceConfig.resolve( context, null, "long", null, null, null, null); @@ -166,7 +166,7 @@ public class ValuesSourceConfigTests extends ESSingleNodeTestCase { .get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { - QueryShardContext context = indexService.newQueryShardContext(0, searcher.getIndexReader(), () -> 42L, null); + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); ValuesSourceConfig config = ValuesSourceConfig.resolve( context, ValueType.NUMBER, "long", null, null, null, null); @@ -193,7 +193,7 @@ public class ValuesSourceConfigTests extends ESSingleNodeTestCase { .get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { - QueryShardContext context = indexService.newQueryShardContext(0, searcher.getIndexReader(), () -> 42L, null); + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); ValuesSourceConfig config = ValuesSourceConfig.resolve( context, null, "bool", null, null, null, null); @@ -215,7 +215,7 @@ public class ValuesSourceConfigTests extends ESSingleNodeTestCase { .get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { - QueryShardContext context = indexService.newQueryShardContext(0, searcher.getIndexReader(), () -> 42L, null); + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); ValuesSourceConfig config = ValuesSourceConfig.resolve( context, null, "bool", null, null, null, null); @@ -242,7 +242,7 @@ public class ValuesSourceConfigTests extends ESSingleNodeTestCase { .get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { - QueryShardContext context = indexService.newQueryShardContext(0, searcher.getIndexReader(), () -> 42L, null); + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); ValuesSourceConfig config = ValuesSourceConfig.resolve( context, ValueType.BOOLEAN, "bool", null, null, null, null); @@ -263,7 +263,7 @@ public class ValuesSourceConfigTests extends ESSingleNodeTestCase { public void testTypeFieldDeprecation() { IndexService indexService = createIndex("index", Settings.EMPTY, "type"); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { - QueryShardContext context = indexService.newQueryShardContext(0, searcher.getIndexReader(), () -> 42L, null); + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); ValuesSourceConfig config = ValuesSourceConfig.resolve( context, null, TypeFieldMapper.NAME, null, null, null, null); @@ -280,7 +280,7 @@ public class ValuesSourceConfigTests extends ESSingleNodeTestCase { .get(); try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) { - QueryShardContext context = indexService.newQueryShardContext(0, searcher.getIndexReader(), () -> 42L, null); + QueryShardContext context = indexService.newQueryShardContext(0, searcher, () -> 42L, null); ValuesSourceConfig config = ValuesSourceConfig.resolve( context, ValueType.STRING, "alias", null, null, null, null); ValuesSource.Bytes valuesSource = config.toValuesSource(context); diff --git a/server/src/test/java/org/elasticsearch/search/collapse/CollapseBuilderTests.java b/server/src/test/java/org/elasticsearch/search/collapse/CollapseBuilderTests.java index 82c3e261e84..7f192dfea61 100644 --- a/server/src/test/java/org/elasticsearch/search/collapse/CollapseBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/collapse/CollapseBuilderTests.java @@ -37,9 +37,7 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.InnerHitBuilderTests; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.search.SearchContextException; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractSerializingTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -138,24 +136,14 @@ public class CollapseBuilderTests extends AbstractSerializingTestCase builder.build(searchContext)); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> builder.build(shardContext)); assertEquals(exc.getMessage(), "cannot collapse on field `field` without `doc_values`"); fieldType.setHasDocValues(true); builder.setInnerHits(new InnerHitBuilder()); - exc = expectThrows(SearchContextException.class, () -> builder.build(searchContext)); + exc = expectThrows(IllegalArgumentException.class, () -> builder.build(shardContext)); assertEquals(exc.getMessage(), "cannot expand `inner_hits` for collapse field `field`, " + "only indexed field can retrieve `inner_hits`"); @@ -186,11 +174,11 @@ public class CollapseBuilderTests extends AbstractSerializingTestCase builder.build(context)); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> builder.build(shardContext)); assertEquals(exc.getMessage(), "no mapping found for `unknown_field` in order to collapse on"); } @@ -217,9 +205,9 @@ public class CollapseBuilderTests extends AbstractSerializingTestCase builder.build(context)); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> builder.build(shardContext)); assertEquals(exc.getMessage(), "unknown type for collapse field `field`, only keywords and numbers are accepted"); } } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index e6f022e87a6..65d2e92555c 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -277,8 +278,9 @@ public class HighlightBuilderTests extends ESTestCase { Index index = new Index(randomAlphaOfLengthBetween(1, 10), "_na_"); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings); // shard context will only need indicesQueriesRegistry for building Query objects nested in highlighter - QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, null, null, null, null, null, null, - xContentRegistry(), namedWriteableRegistry, null, null, System::currentTimeMillis, null) { + QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE, + null, null, null, null, null, xContentRegistry(), namedWriteableRegistry, + null, null, System::currentTimeMillis, null) { @Override public MappedFieldType fieldMapper(String name) { TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name); diff --git a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java index 69c0871d466..995cfa3b1c9 100644 --- a/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/rescore/QueryRescorerBuilderTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.NamedObjectNotFoundException; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; @@ -141,7 +142,8 @@ public class QueryRescorerBuilderTests extends ESTestCase { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(randomAlphaOfLengthBetween(1, 10), indexSettings); // shard context will only need indicesQueriesRegistry for building Query objects nested in query rescorer - QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, null, null, null, null, null, null, + QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE, + null, null, null, null, null, xContentRegistry(), namedWriteableRegistry, null, null, () -> nowInMillis, null) { @Override public MappedFieldType fieldMapper(String name) { @@ -184,8 +186,9 @@ public class QueryRescorerBuilderTests extends ESTestCase { .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(randomAlphaOfLengthBetween(1, 10), indexSettings); // shard context will only need indicesQueriesRegistry for building Query objects nested in query rescorer - QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, null, null, null, null, null, null, - xContentRegistry(), namedWriteableRegistry, null, null, () -> nowInMillis, null) { + QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE, + null, null, null, null, null, + xContentRegistry(), namedWriteableRegistry, null, null, () -> nowInMillis, null) { @Override public MappedFieldType fieldMapper(String name) { TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name); diff --git a/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java b/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java index c6b23dc65fe..a09cb4b0dfa 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/sort/AbstractSortTestCase.java @@ -19,12 +19,12 @@ package org.elasticsearch.search.sort; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SortField; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -191,8 +191,8 @@ public abstract class AbstractSortTestCase> extends EST IndexFieldData.Builder builder = fieldType.fielddataBuilder(fieldIndexName); return builder.build(idxSettings, fieldType, new IndexFieldDataCache.None(), null, null); }; - return new QueryShardContext(0, idxSettings, bitsetFilterCache, IndexSearcher::new, indexFieldDataLookup, null, null, - scriptService, xContentRegistry(), namedWriteableRegistry, null, null, () -> randomNonNegativeLong(), null) { + return new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE, bitsetFilterCache, indexFieldDataLookup, + null, null, scriptService, xContentRegistry(), namedWriteableRegistry, null, null, () -> randomNonNegativeLong(), null) { @Override public MappedFieldType fieldMapper(String name) { diff --git a/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java b/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java index 159b0612588..d0289c7fa97 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/AbstractSuggestionBuilderTestCase.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -178,8 +179,9 @@ public abstract class AbstractSuggestionBuilderTestCase new NamedAnalyzer((String) invocation.getArguments()[0], AnalyzerScope.INDEX, new SimpleAnalyzer())); when(scriptService.compile(any(Script.class), any())).then(invocation -> new TestTemplateService.MockTemplateScript.Factory( ((Script) invocation.getArguments()[0]).getIdOrCode())); - QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, null, null, null, mapperService, null, - scriptService, xContentRegistry(), namedWriteableRegistry, null, null, System::currentTimeMillis, null); + QueryShardContext mockShardContext = new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE, null, + null, mapperService, null, scriptService, xContentRegistry(), namedWriteableRegistry, null, null, + System::currentTimeMillis, null); SuggestionContext suggestionContext = suggestionBuilder.build(mockShardContext); assertEquals(toBytesRef(suggestionBuilder.text()), suggestionContext.getText()); diff --git a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java index 882b3cc4b1e..c00760899c4 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java @@ -116,8 +116,8 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase { xContentRegistry(), true); // Write blobs in different formats - checksumSMILE.write(new BlobObj("checksum smile"), blobContainer, "check-smile"); - checksumSMILECompressed.write(new BlobObj("checksum smile compressed"), blobContainer, "check-smile-comp"); + checksumSMILE.write(new BlobObj("checksum smile"), blobContainer, "check-smile", true); + checksumSMILECompressed.write(new BlobObj("checksum smile compressed"), blobContainer, "check-smile-comp", true); // Assert that all checksum blobs can be read by all formats assertEquals(checksumSMILE.read(blobContainer, "check-smile").getText(), "checksum smile"); @@ -136,8 +136,8 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase { ChecksumBlobStoreFormat checksumFormatComp = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, xContentRegistry(), true); BlobObj blobObj = new BlobObj(veryRedundantText.toString()); - checksumFormatComp.write(blobObj, blobContainer, "blob-comp"); - checksumFormat.write(blobObj, blobContainer, "blob-not-comp"); + checksumFormatComp.write(blobObj, blobContainer, "blob-comp", true); + checksumFormat.write(blobObj, blobContainer, "blob-not-comp", true); Map blobs = blobContainer.listBlobsByPrefix("blob-"); assertEquals(blobs.size(), 2); assertThat(blobs.get("blob-not-comp").length(), greaterThan(blobs.get("blob-comp").length())); @@ -150,7 +150,7 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase { BlobObj blobObj = new BlobObj(testString); ChecksumBlobStoreFormat checksumFormat = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj::fromXContent, xContentRegistry(), randomBoolean()); - checksumFormat.write(blobObj, blobContainer, "test-path"); + checksumFormat.write(blobObj, blobContainer, "test-path", true); assertEquals(checksumFormat.read(blobContainer, "test-path").getText(), testString); randomCorruption(blobContainer, "test-path"); try { diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index b924d1eccce..be2972ff146 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -933,7 +933,6 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/25281") public void testMasterShutdownDuringFailedSnapshot() throws Exception { logger.info("--> starting two master nodes and two data nodes"); internalCluster().startMasterOnlyNodes(2); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index f39b2355c87..21136f6a97d 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -48,13 +48,8 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; -import org.elasticsearch.cluster.SnapshotsInProgress.Entry; -import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; -import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -111,7 +106,6 @@ import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -2110,8 +2104,12 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); - // Create index on 1 nodes and make sure each node has a primary by setting no replicas - assertAcked(prepareCreate("test-idx", 1, Settings.builder().put("number_of_replicas", 0))); + // Create index on two nodes and make sure each node has a primary by setting no replicas + assertAcked(prepareCreate("test-idx", 2, Settings.builder() + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_NUMBER_OF_SHARDS, between(2, 10)))); + + ensureGreen("test-idx"); logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { @@ -2121,11 +2119,13 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L)); logger.info("--> start relocations"); - allowNodes("test-idx", internalCluster().numDataNodes()); + allowNodes("test-idx", 1); logger.info("--> wait for relocations to start"); - waitForRelocationsToStart("test-idx", TimeValue.timeValueMillis(300)); + assertBusy(() -> assertThat( + client().admin().cluster().prepareHealth("test-idx").execute().actionGet().getRelocatingShards(), greaterThan(0)), + 1L, TimeUnit.MINUTES); logger.info("--> snapshot"); client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); @@ -2428,28 +2428,15 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas Client client = client(); - boolean allowPartial = randomBoolean(); logger.info("--> creating repository"); - // only block on repo init if we have partial snapshot or we run into deadlock when acquiring shard locks for index deletion/closing - boolean initBlocking = allowPartial || randomBoolean(); - if (initBlocking) { - assertAcked(client.admin().cluster().preparePutRepository("test-repo") - .setType("mock").setSettings(Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put("block_on_init", true) - )); - } else { - assertAcked(client.admin().cluster().preparePutRepository("test-repo") - .setType("mock").setSettings(Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put("block_on_data", true) - )); - } + assertAcked(client.admin().cluster().preparePutRepository("test-repo") + .setType("mock").setSettings(Settings.builder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put("block_on_data", true))); + createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); @@ -2465,70 +2452,40 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits().value, equalTo(100L)); assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits().value, equalTo(100L)); - logger.info("--> snapshot allow partial {}", allowPartial); + logger.info("--> snapshot"); ActionFuture future = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") - .setIndices("test-idx-*").setWaitForCompletion(true).setPartial(allowPartial).execute(); + .setIndices("test-idx-*").setWaitForCompletion(true).setPartial(false).execute(); logger.info("--> wait for block to kick in"); - if (initBlocking) { - waitForBlock(internalCluster().getMasterName(), "test-repo", TimeValue.timeValueMinutes(1)); - } else { - waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1)); - } - boolean closedOnPartial = false; + waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1)); + try { - if (allowPartial) { - // partial snapshots allow close / delete operations - if (randomBoolean()) { - logger.info("--> delete index while partial snapshot is running"); + // non-partial snapshots do not allow close / delete operations on indices where snapshot has not been completed + if (randomBoolean()) { + try { + logger.info("--> delete index while non-partial snapshot is running"); client.admin().indices().prepareDelete("test-idx-1").get(); - } else { - logger.info("--> close index while partial snapshot is running"); - closedOnPartial = true; - client.admin().indices().prepareClose("test-idx-1").setWaitForActiveShards(ActiveShardCount.DEFAULT).get(); + fail("Expected deleting index to fail during snapshot"); + } catch (SnapshotInProgressException e) { + assertThat(e.getMessage(), containsString("Cannot delete indices that are being snapshotted: [[test-idx-1/")); } } else { - // non-partial snapshots do not allow close / delete operations on indices where snapshot has not been completed - if (randomBoolean()) { - try { - logger.info("--> delete index while non-partial snapshot is running"); - client.admin().indices().prepareDelete("test-idx-1").get(); - fail("Expected deleting index to fail during snapshot"); - } catch (SnapshotInProgressException e) { - assertThat(e.getMessage(), containsString("Cannot delete indices that are being snapshotted: [[test-idx-1/")); - } - } else { - try { - logger.info("--> close index while non-partial snapshot is running"); - client.admin().indices().prepareClose("test-idx-1").get(); - fail("Expected closing index to fail during snapshot"); - } catch (SnapshotInProgressException e) { - assertThat(e.getMessage(), containsString("Cannot close indices that are being snapshotted: [[test-idx-1/")); - } + try { + logger.info("--> close index while non-partial snapshot is running"); + client.admin().indices().prepareClose("test-idx-1").get(); + fail("Expected closing index to fail during snapshot"); + } catch (SnapshotInProgressException e) { + assertThat(e.getMessage(), containsString("Cannot close indices that are being snapshotted: [[test-idx-1/")); } } } finally { - if (initBlocking) { - logger.info("--> unblock running master node"); - unblockNode("test-repo", internalCluster().getMasterName()); - } else { - logger.info("--> unblock all data nodes"); - unblockAllDataNodes("test-repo"); - } + logger.info("--> unblock all data nodes"); + unblockAllDataNodes("test-repo"); } logger.info("--> waiting for snapshot to finish"); CreateSnapshotResponse createSnapshotResponse = future.get(); - if (allowPartial && closedOnPartial == false) { - logger.info("Deleted/Closed index during snapshot, but allow partial"); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.PARTIAL))); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().failedShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), - lessThan(createSnapshotResponse.getSnapshotInfo().totalShards())); - } else { - logger.info("Snapshot successfully completed"); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.SUCCESS))); - } + logger.info("Snapshot successfully completed"); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo((SnapshotState.SUCCESS))); } public void testCloseIndexDuringRestore() throws Exception { @@ -2662,93 +2619,11 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas restoreFut.get(); } - public void testDeleteOrphanSnapshot() throws Exception { - Client client = client(); - - logger.info("--> creating repository"); - final String repositoryName = "test-repo"; - assertAcked(client.admin().cluster().preparePutRepository(repositoryName) - .setType("mock").setSettings(Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - )); - - logger.info("--> create the index"); - final String idxName = "test-idx"; - createIndex(idxName); - ensureGreen(); - - ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - - logger.info("--> snapshot"); - final String snapshotName = "test-snap"; - CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repositoryName, snapshotName) - .setWaitForCompletion(true).setIndices(idxName).get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - - logger.info("--> emulate an orphan snapshot"); - RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, - internalCluster().getMasterName()); - final RepositoryData repositoryData = getRepositoryData(repositoriesService.repository(repositoryName)); - final IndexId indexId = repositoryData.resolveIndexId(idxName); - - clusterService.submitStateUpdateTask("orphan snapshot test", new ClusterStateUpdateTask() { - - @Override - public ClusterState execute(ClusterState currentState) { - // Simulate orphan snapshot - ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); - shards.put(new ShardId(idxName, "_na_", 0), new ShardSnapshotStatus("unknown-node", ShardState.ABORTED, "aborted")); - shards.put(new ShardId(idxName, "_na_", 1), new ShardSnapshotStatus("unknown-node", ShardState.ABORTED, "aborted")); - shards.put(new ShardId(idxName, "_na_", 2), new ShardSnapshotStatus("unknown-node", ShardState.ABORTED, "aborted")); - List entries = new ArrayList<>(); - entries.add(new Entry(new Snapshot(repositoryName, - createSnapshotResponse.getSnapshotInfo().snapshotId()), - true, - false, - State.ABORTED, - Collections.singletonList(indexId), - System.currentTimeMillis(), - repositoryData.getGenId(), - shards.build(), - SnapshotInfoTests.randomUserMetadata())); - return ClusterState.builder(currentState) - .putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(Collections.unmodifiableList(entries))) - .build(); - } - - @Override - public void onFailure(String source, Exception e) { - fail(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { - countDownLatch.countDown(); - } - }); - - countDownLatch.await(); - logger.info("--> try deleting the orphan snapshot"); - - assertAcked(client.admin().cluster().prepareDeleteSnapshot(repositoryName, snapshotName).get("10s")); - } - private boolean waitForIndex(final String index, TimeValue timeout) throws InterruptedException { return awaitBusy(() -> client().admin().indices().prepareExists(index).execute().actionGet().isExists(), timeout.millis(), TimeUnit.MILLISECONDS); } - private boolean waitForRelocationsToStart(final String index, TimeValue timeout) throws InterruptedException { - return awaitBusy(() -> client().admin().cluster().prepareHealth(index).execute().actionGet().getRelocatingShards() > 0, - timeout.millis(), TimeUnit.MILLISECONDS); - } - public void testSnapshotName() throws Exception { disableRepoConsistencyCheck("This test does not create any data in the repository"); @@ -3384,7 +3259,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertThat(shardFailure.reason(), containsString("Random IOException")); } } - } catch (SnapshotCreationException | RepositoryException ex) { + } catch (SnapshotException | RepositoryException ex) { // sometimes, the snapshot will fail with a top level I/O exception assertThat(ExceptionsHelper.stackTrace(ex), containsString("Random IOException")); } @@ -3747,75 +3622,6 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas assertThat(client.prepareGet(restoredIndexName2, typeName, sameSourceIndex ? docId : docId2).get().isExists(), equalTo(true)); } - public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { - final Client client = client(); - - // Blocks on initialization - assertAcked(client.admin().cluster().preparePutRepository("repository") - .setType("mock").setSettings(Settings.builder() - .put("location", randomRepoPath()) - .put("block_on_init", true) - )); - - createIndex("test-idx"); - final int nbDocs = scaledRandomIntBetween(100, 500); - for (int i = 0; i < nbDocs; i++) { - index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i); - } - flushAndRefresh("test-idx"); - assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo((long) nbDocs)); - - // Create a snapshot - client.admin().cluster().prepareCreateSnapshot("repository", "snap").execute(); - waitForBlock(internalCluster().getMasterName(), "repository", TimeValue.timeValueMinutes(1)); - boolean blocked = true; - - // Snapshot is initializing (and is blocked at this stage) - SnapshotsStatusResponse snapshotsStatus = client.admin().cluster().prepareSnapshotStatus("repository").setSnapshots("snap").get(); - assertThat(snapshotsStatus.getSnapshots().iterator().next().getState(), equalTo(State.INIT)); - - final List states = new CopyOnWriteArrayList<>(); - final ClusterStateListener listener = event -> { - SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE); - for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { - if ("snap".equals(entry.snapshot().getSnapshotId().getName())) { - states.add(entry.state()); - } - } - }; - - try { - // Record the upcoming states of the snapshot on all nodes - internalCluster().getInstances(ClusterService.class).forEach(clusterService -> clusterService.addListener(listener)); - - // Delete the snapshot while it is being initialized - ActionFuture delete = client.admin().cluster().prepareDeleteSnapshot("repository", "snap").execute(); - - // The deletion must set the snapshot in the ABORTED state - assertBusy(() -> { - SnapshotsStatusResponse status = - client.admin().cluster().prepareSnapshotStatus("repository").setSnapshots("snap").get(); - assertThat(status.getSnapshots().iterator().next().getState(), equalTo(State.ABORTED)); - }); - - // Now unblock the repository - unblockNode("repository", internalCluster().getMasterName()); - blocked = false; - - assertAcked(delete.get()); - expectThrows(SnapshotMissingException.class, () -> - client.admin().cluster().prepareGetSnapshots("repository").setSnapshots("snap").get()); - - assertFalse("Expecting snapshot state to be updated", states.isEmpty()); - assertFalse("Expecting snapshot to be aborted and not started at all", states.contains(State.STARTED)); - } finally { - internalCluster().getInstances(ClusterService.class).forEach(clusterService -> clusterService.removeListener(listener)); - if (blocked) { - unblockNode("repository", internalCluster().getMasterName()); - } - } - } - public void testRestoreIncreasesPrimaryTerms() { final String indexName = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); createIndex(indexName, Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 3ffa975a203..5a802414864 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -196,12 +196,14 @@ import java.util.stream.Stream; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.elasticsearch.action.support.ActionTestUtils.assertNoFailureListener; import static org.elasticsearch.env.Environment.PATH_HOME_SETTING; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.mockito.Mockito.mock; public class SnapshotResiliencyTests extends ESTestCase { @@ -493,6 +495,85 @@ public class SnapshotResiliencyTests extends ESTestCase { assertThat(snapshotIds, either(hasSize(1)).or(hasSize(0))); } + public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { + setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10)); + + String repoName = "repo"; + String snapshotName = "snapshot"; + final String index = "test"; + + final int shards = randomIntBetween(1, 10); + final int documents = randomIntBetween(2, 100); + TestClusterNode masterNode = + testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state()); + + final StepListener createSnapshotResponseStepListener = new StepListener<>(); + + continueOrDie(createRepoAndIndex(masterNode, repoName, index, shards), createIndexResponse -> { + final AtomicBoolean initiatedSnapshot = new AtomicBoolean(false); + for (int i = 0; i < documents; ++i) { + // Index a few documents with different field names so we trigger a dynamic mapping update for each of them + masterNode.client.bulk( + new BulkRequest().add(new IndexRequest(index).source(Collections.singletonMap("foo" + i, "bar"))) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), + assertNoFailureListener( + bulkResponse -> { + assertFalse("Failures in bulkresponse: " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures()); + if (initiatedSnapshot.compareAndSet(false, true)) { + masterNode.client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true).execute(createSnapshotResponseStepListener); + } + })); + } + }); + + final String restoredIndex = "restored"; + + final StepListener restoreSnapshotResponseStepListener = new StepListener<>(); + + continueOrDie(createSnapshotResponseStepListener, createSnapshotResponse -> masterNode.client.admin().cluster().restoreSnapshot( + new RestoreSnapshotRequest(repoName, snapshotName) + .renamePattern(index).renameReplacement(restoredIndex).waitForCompletion(true), restoreSnapshotResponseStepListener)); + + final StepListener searchResponseStepListener = new StepListener<>(); + + continueOrDie(restoreSnapshotResponseStepListener, restoreSnapshotResponse -> { + assertEquals(shards, restoreSnapshotResponse.getRestoreInfo().totalShards()); + masterNode.client.search( + new SearchRequest(restoredIndex).source(new SearchSourceBuilder().size(documents).trackTotalHits(true)), + searchResponseStepListener); + }); + + final AtomicBoolean documentCountVerified = new AtomicBoolean(); + + continueOrDie(searchResponseStepListener, r -> { + final long hitCount = r.getHits().getTotalHits().value; + assertThat( + "Documents were restored but the restored index mapping was older than some documents and misses some of their fields", + (int) hitCount, + lessThanOrEqualTo(((Map) masterNode.clusterService.state().metaData().index(restoredIndex).mapping() + .sourceAsMap().get("properties")).size()) + ); + documentCountVerified.set(true); + }); + + runUntil(documentCountVerified::get, TimeUnit.MINUTES.toMillis(5L)); + + assertNotNull(createSnapshotResponseStepListener.result()); + assertNotNull(restoreSnapshotResponseStepListener.result()); + SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); + final Repository repository = masterNode.repositoriesService.repository(repoName); + Collection snapshotIds = repository.getRepositoryData().getSnapshotIds(); + assertThat(snapshotIds, hasSize(1)); + + final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next()); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertThat(snapshotInfo.indices(), containsInAnyOrder(index)); + assertEquals(shards, snapshotInfo.successfulShards()); + assertEquals(0, snapshotInfo.failedShards()); + } + private StepListener createRepoAndIndex(TestClusterNode masterNode, String repoName, String index, int shards) { final AdminClient adminClient = masterNode.client.admin(); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index cbc4d8b0026..7d82e0514d1 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -99,9 +99,9 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase { logger.info("--> wait for data nodes to get blocked"); waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1)); - final List snapshotStatus = client.admin().cluster().snapshotsStatus( - new SnapshotsStatusRequest("test-repo", new String[]{"test-snap"})).actionGet().getSnapshots(); - assertBusy(() -> assertEquals(SnapshotsInProgress.State.STARTED, snapshotStatus.get(0).getState()), 1L, TimeUnit.MINUTES); + assertBusy(() -> assertEquals(SnapshotsInProgress.State.STARTED, client.admin().cluster().snapshotsStatus( + new SnapshotsStatusRequest("test-repo", new String[]{"test-snap"})).actionGet().getSnapshots().get(0).getState()), 1L, + TimeUnit.MINUTES); logger.info("--> unblock all data nodes"); unblockAllDataNodes("test-repo"); diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java index 9de395d5f82..11f30e0633b 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java @@ -291,9 +291,11 @@ public class MockEventuallyConsistentRepository extends BlobStoreRepository { // We do some checks in case there is a consistent state for a blob to prevent turning it inconsistent. final boolean hasConsistentContent = relevantActions.size() == 1 && relevantActions.get(0).operation == Operation.PUT; - if (BlobStoreRepository.INDEX_LATEST_BLOB.equals(blobName)) { + if (BlobStoreRepository.INDEX_LATEST_BLOB.equals(blobName) + || blobName.startsWith(BlobStoreRepository.METADATA_PREFIX)) { // TODO: Ensure that it is impossible to ever decrement the generation id stored in index.latest then assert that - // it never decrements here + // it never decrements here. Same goes for the metadata, ensure that we never overwrite newer with older + // metadata. } else if (blobName.startsWith(BlobStoreRepository.SNAPSHOT_PREFIX)) { if (hasConsistentContent) { if (basePath().buildAsString().equals(path().buildAsString())) { diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java index 5500d603ac5..14d4a5ba60b 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepositoryTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.snapshots.mockstore; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; @@ -158,19 +159,19 @@ public class MockEventuallyConsistentRepositoryTests extends ESTestCase { // We create a snap- blob for snapshot "foo" in the first generation final SnapshotId snapshotId = new SnapshotId("foo", UUIDs.randomBase64UUID()); repository.finalizeSnapshot(snapshotId, Collections.emptyList(), 1L, null, 5, Collections.emptyList(), - -1L, false, Collections.emptyMap()); + -1L, false, MetaData.EMPTY_META_DATA, Collections.emptyMap()); // We try to write another snap- blob for "foo" in the next generation. It fails because the content differs. final AssertionError assertionError = expectThrows(AssertionError.class, () -> repository.finalizeSnapshot( snapshotId, Collections.emptyList(), 1L, null, 6, Collections.emptyList(), - 0, false, Collections.emptyMap())); + 0, false, MetaData.EMPTY_META_DATA, Collections.emptyMap())); assertThat(assertionError.getMessage(), equalTo("\nExpected: <6>\n but: was <5>")); // We try to write yet another snap- blob for "foo" in the next generation. // It passes cleanly because the content of the blob except for the timestamps. repository.finalizeSnapshot(snapshotId, Collections.emptyList(), 1L, null, 5, Collections.emptyList(), - 0, false, Collections.emptyMap()); + 0, false, MetaData.EMPTY_META_DATA, Collections.emptyMap()); } } diff --git a/settings.gradle b/settings.gradle index 5337e321f3f..244d29c6a0c 100644 --- a/settings.gradle +++ b/settings.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.Version - String dirName = rootProject.projectDir.name rootProject.name = dirName diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index 0bf7e2ed216..fe7b8720981 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -703,8 +703,10 @@ public class AbstractCoordinatorTestCase extends ESTestCase { if (rarely()) { nodeEnvironment = newNodeEnvironment(); nodeEnvironments.add(nodeEnvironment); - delegate = new MockGatewayMetaState(Settings.EMPTY, nodeEnvironment, xContentRegistry(), localNode) - .getPersistedState(Settings.EMPTY, null); + final MockGatewayMetaState gatewayMetaState + = new MockGatewayMetaState(Settings.EMPTY, nodeEnvironment, xContentRegistry(), localNode); + gatewayMetaState.start(); + delegate = gatewayMetaState.getPersistedState(); } else { nodeEnvironment = null; delegate = new InMemoryPersistedState(0L, @@ -734,8 +736,10 @@ public class AbstractCoordinatorTestCase extends ESTestCase { new Manifest(updatedTerm, manifest.getClusterStateVersion(), manifest.getGlobalGeneration(), manifest.getIndexGenerations())); } - delegate = new MockGatewayMetaState(Settings.EMPTY, nodeEnvironment, xContentRegistry(), newLocalNode) - .getPersistedState(Settings.EMPTY, null); + final MockGatewayMetaState gatewayMetaState + = new MockGatewayMetaState(Settings.EMPTY, nodeEnvironment, xContentRegistry(), newLocalNode); + gatewayMetaState.start(); + delegate = gatewayMetaState.getPersistedState(); } else { nodeEnvironment = null; BytesStreamOutput outStream = new BytesStreamOutput(); diff --git a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java index 317a9d1b7ba..006f2948831 100644 --- a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java +++ b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java @@ -28,10 +28,6 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.plugins.MetaDataUpgrader; import org.elasticsearch.transport.TransportService; -import java.io.IOException; - -import static org.mockito.Mockito.mock; - /** * {@link GatewayMetaState} constructor accepts a lot of arguments. * It's not always easy / convenient to construct these dependencies. @@ -42,10 +38,8 @@ public class MockGatewayMetaState extends GatewayMetaState { private final DiscoveryNode localNode; public MockGatewayMetaState(Settings settings, NodeEnvironment nodeEnvironment, - NamedXContentRegistry xContentRegistry, DiscoveryNode localNode) throws IOException { - super(settings, new MetaStateService(nodeEnvironment, xContentRegistry), - mock(MetaDataIndexUpgradeService.class), mock(MetaDataUpgrader.class), - mock(TransportService.class), mock(ClusterService.class)); + NamedXContentRegistry xContentRegistry, DiscoveryNode localNode) { + super(settings, new MetaStateService(nodeEnvironment, xContentRegistry)); this.localNode = localNode; } @@ -55,8 +49,12 @@ public class MockGatewayMetaState extends GatewayMetaState { } @Override - public void applyClusterStateUpdaters() { + public void applyClusterStateUpdaters(TransportService transportService, ClusterService clusterService) { // Just set localNode here, not to mess with ClusterService and IndicesService mocking previousClusterState = ClusterStateUpdaters.setLocalNode(previousClusterState, localNode); } + + public void start() { + start(null, null, null, null); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index bc5a368c47d..cce9780b092 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -832,12 +832,14 @@ public abstract class IndexShardTestCase extends ESTestCase { final Snapshot snapshot, final Repository repository) throws IOException { final IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); + final PlainActionFuture future = PlainActionFuture.newFuture(); try (Engine.IndexCommitRef indexCommitRef = shard.acquireLastIndexCommit(true)) { Index index = shard.shardId().getIndex(); IndexId indexId = new IndexId(index.getName(), index.getUUID()); repository.snapshotShard(shard.store(), shard.mapperService(), snapshot.getSnapshotId(), indexId, - indexCommitRef.getIndexCommit(), snapshotStatus); + indexCommitRef.getIndexCommit(), snapshotStatus, future); + future.actionGet(); } final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java index 418cee00c0d..417e4e98649 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/RestoreOnlyRepository.java @@ -100,7 +100,7 @@ public abstract class RestoreOnlyRepository extends AbstractLifecycleComponent i @Override public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, List shardFailures, long repositoryStateId, - boolean includeGlobalState, Map userMetadata) { + boolean includeGlobalState, MetaData metaData, Map userMetadata) { return null; } @@ -135,7 +135,7 @@ public abstract class RestoreOnlyRepository extends AbstractLifecycleComponent i @Override public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, - IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/IngestTestPlugin.java b/test/framework/src/main/java/org/elasticsearch/ingest/IngestTestPlugin.java index dd38a0707b4..1fe7edc7452 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/IngestTestPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/IngestTestPlugin.java @@ -37,6 +37,10 @@ public class IngestTestPlugin extends Plugin implements IngestPlugin { if (doc.hasField("fail") && doc.getFieldValue("fail", Boolean.class)) { throw new IllegalArgumentException("test processor failed"); } + if (doc.hasField("drop") && doc.getFieldValue("drop", Boolean.class)) { + return null; + } + return doc; })); } } diff --git a/test/framework/src/main/java/org/elasticsearch/ingest/TestProcessor.java b/test/framework/src/main/java/org/elasticsearch/ingest/TestProcessor.java index a1feb3e1f73..80579831475 100644 --- a/test/framework/src/main/java/org/elasticsearch/ingest/TestProcessor.java +++ b/test/framework/src/main/java/org/elasticsearch/ingest/TestProcessor.java @@ -22,6 +22,7 @@ package org.elasticsearch.ingest; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; +import java.util.function.Function; /** * Processor used for testing, keeps track of how many times it is invoked and @@ -31,15 +32,30 @@ public class TestProcessor implements Processor { private final String type; private final String tag; - private final Consumer ingestDocumentConsumer; + private final Function ingestDocumentMapper; private final AtomicInteger invokedCounter = new AtomicInteger(); public TestProcessor(Consumer ingestDocumentConsumer) { this(null, "test-processor", ingestDocumentConsumer); } + public TestProcessor(RuntimeException e) { + this(null, "test-processor", e); + } + + public TestProcessor(String tag, String type, RuntimeException e) { + this(tag, type, (Consumer) i -> { throw e; }); + } + public TestProcessor(String tag, String type, Consumer ingestDocumentConsumer) { - this.ingestDocumentConsumer = ingestDocumentConsumer; + this(tag, type, id -> { + ingestDocumentConsumer.accept(id); + return id; + }); + } + + public TestProcessor(String tag, String type, Function ingestDocumentMapper) { + this.ingestDocumentMapper = ingestDocumentMapper; this.type = type; this.tag = tag; } @@ -47,8 +63,7 @@ public class TestProcessor implements Processor { @Override public IngestDocument execute(IngestDocument ingestDocument) throws Exception { invokedCounter.incrementAndGet(); - ingestDocumentConsumer.accept(ingestDocument); - return ingestDocument; + return ingestDocumentMapper.apply(ingestDocument); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java index bcc961aaf03..4dff0401b5a 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -302,7 +302,7 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends ESSingleNodeT return future.actionGet(); } - private BlobStoreRepository getRepository() { + protected BlobStoreRepository getRepository() { return (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository("test-repo"); } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java index 4dd506a165d..bbfaa0d9228 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java @@ -58,6 +58,7 @@ import static org.elasticsearch.test.ESTestCase.randomIntBetween; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @@ -169,9 +170,12 @@ public final class BlobStoreTestUtil { continue; } if (snapshotInfo.shardFailures().stream().noneMatch(shardFailure -> - shardFailure.index().equals(index) != false && shardFailure.shardId() == Integer.parseInt(entry.getKey()))) { - assertThat(entry.getValue().listBlobs(), + shardFailure.index().equals(index) && shardFailure.shardId() == Integer.parseInt(entry.getKey()))) { + final Map shardPathContents = entry.getValue().listBlobs(); + assertThat(shardPathContents, hasKey(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotId.getUUID()))); + assertThat(shardPathContents.keySet().stream() + .filter(name -> name.startsWith(BlobStoreRepository.INDEX_FILE_PREFIX)).count(), lessThanOrEqualTo(2L)); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index 5268e4004ef..814550be5f8 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -28,8 +28,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.RepositoryData; @@ -63,23 +61,21 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase protected abstract String repositoryType(); protected Settings repositorySettings() { - final Settings.Builder settings = Settings.builder(); - settings.put("compress", randomBoolean()); - if (randomBoolean()) { - long size = 1 << randomInt(10); - settings.put("chunk_size", new ByteSizeValue(size, ByteSizeUnit.KB)); - } - return settings.build(); + return Settings.builder().put("compress", randomBoolean()).build(); } protected final String createRepository(final String name) { + return createRepository(name, repositorySettings()); + } + + protected final String createRepository(final String name, final Settings settings) { final boolean verify = randomBoolean(); - logger.debug("--> creating repository [name: {}, verify: {}]", name, verify); + logger.debug("--> creating repository [name: {}, verify: {}, settings: {}]", name, verify, settings); assertAcked(client().admin().cluster().preparePutRepository(name) .setType(repositoryType()) .setVerify(verify) - .setSettings(repositorySettings())); + .setSettings(settings)); internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).forEach(repositories -> { assertThat(repositories.repository(name), notNullValue()); @@ -307,7 +303,7 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase assertThat(response.getSnapshotInfo().successfulShards(), equalTo(response.getSnapshotInfo().totalShards())); } - private static void assertSuccessfulRestore(RestoreSnapshotRequestBuilder requestBuilder) { + protected static void assertSuccessfulRestore(RestoreSnapshotRequestBuilder requestBuilder) { RestoreSnapshotResponse response = requestBuilder.get(); assertSuccessfulRestore(response); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java new file mode 100644 index 00000000000..21880d13683 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java @@ -0,0 +1,180 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.blobstore; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.http.HttpStatus; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.test.BackgroundIndexer; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; + +/** + * Integration tests for {@link BlobStoreRepository} implementations rely on mock APIs that emulate cloud-based services. + */ +@SuppressForbidden(reason = "this test uses a HttpServer to emulate a cloud-based storage service") +public abstract class ESMockAPIBasedRepositoryIntegTestCase extends ESBlobStoreRepositoryIntegTestCase { + + private static HttpServer httpServer; + private Map handlers; + + @BeforeClass + public static void startHttpServer() throws Exception { + httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + httpServer.start(); + } + + @Before + public void setUpHttpServer() { + handlers = createHttpHandlers(); + handlers.forEach((c, h) -> { + HttpHandler handler = h; + if (randomBoolean()) { + handler = createErroneousHttpHandler(handler); + } + httpServer.createContext(c, handler); + }); + } + + @AfterClass + public static void stopHttpServer() { + httpServer.stop(0); + httpServer = null; + } + + @After + public void tearDownHttpServer() { + if (handlers != null) { + handlers.keySet().forEach(context -> httpServer.removeContext(context)); + } + } + + protected abstract Map createHttpHandlers(); + + protected abstract HttpHandler createErroneousHttpHandler(HttpHandler delegate); + + /** + * Test the snapshot and restore of an index which has large segments files. + */ + public final void testSnapshotWithLargeSegmentFiles() throws Exception { + final String repository = createRepository(randomName()); + final String index = "index-no-merges"; + createIndex(index, Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build()); + + final long nbDocs = randomLongBetween(10_000L, 20_000L); + try (BackgroundIndexer indexer = new BackgroundIndexer(index, "_doc", client(), (int) nbDocs)) { + waitForDocs(nbDocs, indexer); + } + + flushAndRefresh(index); + ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); + assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs); + + assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repository, "snapshot") + .setWaitForCompletion(true).setIndices(index)); + + assertAcked(client().admin().indices().prepareDelete(index)); + + assertSuccessfulRestore(client().admin().cluster().prepareRestoreSnapshot(repository, "snapshot").setWaitForCompletion(true)); + ensureGreen(index); + assertHitCount(client().prepareSearch(index).setSize(0).setTrackTotalHits(true).get(), nbDocs); + } + + protected static String httpServerUrl() { + InetSocketAddress address = httpServer.getAddress(); + return "http://" + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort(); + } + + /** + * HTTP handler that injects random service errors + * + * Note: it is not a good idea to allow this handler to simulate too many errors as it would + * slow down the test suite. + */ + @SuppressForbidden(reason = "this test uses a HttpServer to emulate a cloud-based storage service") + protected abstract static class ErroneousHttpHandler implements HttpHandler { + + // first key is a unique identifier for the incoming HTTP request, + // value is the number of times the request has been seen + private final Map requests; + private final HttpHandler delegate; + private final int maxErrorsPerRequest; + + @SuppressForbidden(reason = "this test uses a HttpServer to emulate a cloud-based storage service") + protected ErroneousHttpHandler(final HttpHandler delegate, final int maxErrorsPerRequest) { + this.requests = new ConcurrentHashMap<>(); + this.delegate = delegate; + this.maxErrorsPerRequest = maxErrorsPerRequest; + assert maxErrorsPerRequest > 1; + } + + @Override + public void handle(final HttpExchange exchange) throws IOException { + final String requestId = requestUniqueId(exchange); + assert Strings.hasText(requestId); + + final boolean canFailRequest = canFailRequest(exchange); + final int count = requests.computeIfAbsent(requestId, req -> new AtomicInteger(0)).incrementAndGet(); + if (count >= maxErrorsPerRequest || canFailRequest == false || randomBoolean()) { + requests.remove(requestId); + delegate.handle(exchange); + } else { + handleAsError(exchange); + } + } + + private void handleAsError(final HttpExchange exchange) throws IOException { + Streams.readFully(exchange.getRequestBody()); + exchange.sendResponseHeaders(HttpStatus.SC_INTERNAL_SERVER_ERROR, -1); + exchange.close(); + } + + protected abstract String requestUniqueId(HttpExchange exchange); + + protected boolean canFailRequest(final HttpExchange exchange) { + return true; + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 6afa1b32182..186fc94ee57 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.index.Index; @@ -105,71 +106,6 @@ public abstract class AggregatorTestCase extends ESTestCase { private List releasables = new ArrayList<>(); private static final String TYPE_NAME = "type"; - protected AggregatorFactory createAggregatorFactory(AggregationBuilder aggregationBuilder, - IndexSearcher indexSearcher, - MappedFieldType... fieldTypes) throws IOException { - return createAggregatorFactory(aggregationBuilder, indexSearcher, createIndexSettings(), - new MultiBucketConsumer(DEFAULT_MAX_BUCKETS), fieldTypes); - } - - - protected AggregatorFactory createAggregatorFactory(AggregationBuilder aggregationBuilder, - IndexSearcher indexSearcher, - IndexSettings indexSettings, - MultiBucketConsumer bucketConsumer, - MappedFieldType... fieldTypes) throws IOException { - return createAggregatorFactory(null, aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes); - } - - /** Create a factory for the given aggregation builder. */ - protected AggregatorFactory createAggregatorFactory(Query query, - AggregationBuilder aggregationBuilder, - IndexSearcher indexSearcher, - IndexSettings indexSettings, - MultiBucketConsumer bucketConsumer, - MappedFieldType... fieldTypes) throws IOException { - SearchContext searchContext = createSearchContext(indexSearcher, indexSettings); - CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); - IndexShard indexShard = mock(IndexShard.class); - when(indexShard.shardId()).thenReturn(new ShardId("test", "test", 0)); - when(searchContext.indexShard()).thenReturn(indexShard); - when(searchContext.aggregations()) - .thenReturn(new SearchContextAggregations(AggregatorFactories.EMPTY, bucketConsumer)); - when(searchContext.query()).thenReturn(query); - when(searchContext.bigArrays()).thenReturn(new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService)); - // TODO: now just needed for top_hits, this will need to be revised for other agg unit tests: - MapperService mapperService = mapperServiceMock(); - when(mapperService.getIndexSettings()).thenReturn(indexSettings); - when(mapperService.hasNested()).thenReturn(false); - DocumentMapper mapper = mock(DocumentMapper.class); - when(mapper.typeText()).thenReturn(new Text(TYPE_NAME)); - when(mapper.type()).thenReturn(TYPE_NAME); - when(mapperService.documentMapper()).thenReturn(mapper); - when(searchContext.mapperService()).thenReturn(mapperService); - IndexFieldDataService ifds = new IndexFieldDataService(indexSettings, - new IndicesFieldDataCache(Settings.EMPTY, new IndexFieldDataCache.Listener() { - }), circuitBreakerService, mapperService); - when(searchContext.getForField(Mockito.any(MappedFieldType.class))) - .thenAnswer(invocationOnMock -> ifds.getForField((MappedFieldType) invocationOnMock.getArguments()[0])); - - SearchLookup searchLookup = new SearchLookup(mapperService, ifds::getForField, new String[]{TYPE_NAME}); - when(searchContext.lookup()).thenReturn(searchLookup); - - QueryShardContext queryShardContext = queryShardContextMock(mapperService, indexSettings, circuitBreakerService); - - when(searchContext.getQueryShardContext()).thenReturn(queryShardContext); - Map fieldNameToType = new HashMap<>(); - fieldNameToType.putAll(Arrays.stream(fieldTypes) - .filter(Objects::nonNull) - .collect(Collectors.toMap(MappedFieldType::name, Function.identity()))); - fieldNameToType.putAll(getFieldAliases(fieldTypes)); - - registerFieldTypes(queryShardContext, searchContext, mapperService, - circuitBreakerService, fieldNameToType); - - return aggregationBuilder.build(searchContext, null); - } - /** * Allows subclasses to provide alternate names for the provided field type, which * can be useful when testing aggregations on field aliases. @@ -178,10 +114,8 @@ public abstract class AggregatorTestCase extends ESTestCase { return Collections.emptyMap(); } - private void registerFieldTypes(QueryShardContext queryShardContext, - SearchContext searchContext, + private void registerFieldTypes(SearchContext searchContext, MapperService mapperService, - CircuitBreakerService circuitBreakerService, Map fieldNameToType) { for (Map.Entry entry : fieldNameToType.entrySet()) { String fieldName = entry.getKey(); @@ -190,8 +124,6 @@ public abstract class AggregatorTestCase extends ESTestCase { when(mapperService.fullName(fieldName)).thenReturn(fieldType); when(searchContext.smartNameFieldType(fieldName)).thenReturn(fieldType); } - - } protected A createAggregator(AggregationBuilder aggregationBuilder, @@ -231,13 +163,18 @@ public abstract class AggregatorTestCase extends ESTestCase { IndexSettings indexSettings, MultiBucketConsumer bucketConsumer, MappedFieldType... fieldTypes) throws IOException { + SearchContext searchContext = createSearchContext(indexSearcher, indexSettings, query, bucketConsumer, fieldTypes); @SuppressWarnings("unchecked") - A aggregator = (A) createAggregatorFactory(query, aggregationBuilder, indexSearcher, indexSettings, bucketConsumer, fieldTypes) - .create(null, true); + A aggregator = (A) aggregationBuilder.build(searchContext.getQueryShardContext(), null) + .create(searchContext, null, true); return aggregator; } - protected SearchContext createSearchContext(IndexSearcher indexSearcher, IndexSettings indexSettings) { + protected SearchContext createSearchContext(IndexSearcher indexSearcher, + IndexSettings indexSettings, + Query query, + MultiBucketConsumer bucketConsumer, + MappedFieldType... fieldTypes) { QueryCache queryCache = new DisabledQueryCache(indexSettings); QueryCachingPolicy queryCachingPolicy = new QueryCachingPolicy() { @Override @@ -258,7 +195,38 @@ public abstract class AggregatorTestCase extends ESTestCase { when(searchContext.searcher()).thenReturn(contextIndexSearcher); when(searchContext.fetchPhase()) .thenReturn(new FetchPhase(Arrays.asList(new FetchSourceSubPhase(), new DocValueFieldsFetchSubPhase()))); - when(searchContext.getObjectMapper(anyString())).thenAnswer(invocation -> { + when(searchContext.bitsetFilterCache()).thenReturn(new BitsetFilterCache(indexSettings, mock(Listener.class))); + CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); + IndexShard indexShard = mock(IndexShard.class); + when(indexShard.shardId()).thenReturn(new ShardId("test", "test", 0)); + when(searchContext.indexShard()).thenReturn(indexShard); + when(searchContext.aggregations()) + .thenReturn(new SearchContextAggregations(AggregatorFactories.EMPTY, bucketConsumer)); + when(searchContext.query()).thenReturn(query); + when(searchContext.bigArrays()).thenReturn(new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService)); + + // TODO: now just needed for top_hits, this will need to be revised for other agg unit tests: + MapperService mapperService = mapperServiceMock(); + when(mapperService.getIndexSettings()).thenReturn(indexSettings); + when(mapperService.hasNested()).thenReturn(false); + DocumentMapper mapper = mock(DocumentMapper.class); + when(mapper.typeText()).thenReturn(new Text(TYPE_NAME)); + when(mapper.type()).thenReturn(TYPE_NAME); + when(mapperService.documentMapper()).thenReturn(mapper); + when(searchContext.mapperService()).thenReturn(mapperService); + IndexFieldDataService ifds = new IndexFieldDataService(indexSettings, + new IndicesFieldDataCache(Settings.EMPTY, new IndexFieldDataCache.Listener() { + }), circuitBreakerService, mapperService); + when(searchContext.getForField(Mockito.any(MappedFieldType.class))) + .thenAnswer(invocationOnMock -> ifds.getForField((MappedFieldType) invocationOnMock.getArguments()[0])); + + SearchLookup searchLookup = new SearchLookup(mapperService, ifds::getForField, new String[]{TYPE_NAME}); + when(searchContext.lookup()).thenReturn(searchLookup); + + QueryShardContext queryShardContext = + queryShardContextMock(contextIndexSearcher, mapperService, indexSettings, circuitBreakerService); + when(searchContext.getQueryShardContext()).thenReturn(queryShardContext); + when(queryShardContext.getObjectMapper(anyString())).thenAnswer(invocation -> { String fieldName = (String) invocation.getArguments()[0]; if (fieldName.startsWith(NESTEDFIELD_PREFIX)) { BuilderContext context = new BuilderContext(indexSettings.getSettings(), new ContentPath()); @@ -266,7 +234,13 @@ public abstract class AggregatorTestCase extends ESTestCase { } return null; }); - when(searchContext.bitsetFilterCache()).thenReturn(new BitsetFilterCache(indexSettings, mock(Listener.class))); + Map fieldNameToType = new HashMap<>(); + fieldNameToType.putAll(Arrays.stream(fieldTypes) + .filter(Objects::nonNull) + .collect(Collectors.toMap(MappedFieldType::name, Function.identity()))); + fieldNameToType.putAll(getFieldAliases(fieldTypes)); + + registerFieldTypes(searchContext, mapperService, fieldNameToType); doAnswer(invocation -> { /* Store the release-ables so we can release them at the end of the test case. This is important because aggregations don't * close their sub-aggregations. This is fairly similar to what the production code does. */ @@ -297,13 +271,15 @@ public abstract class AggregatorTestCase extends ESTestCase { /** * sub-tests that need a more complex mock can overwrite this */ - protected QueryShardContext queryShardContextMock(MapperService mapperService, IndexSettings indexSettings, - CircuitBreakerService circuitBreakerService) { + protected QueryShardContext queryShardContextMock(IndexSearcher searcher, + MapperService mapperService, + IndexSettings indexSettings, + CircuitBreakerService circuitBreakerService) { - return new QueryShardContext(0, indexSettings, null, null, + return new QueryShardContext(0, indexSettings, BigArrays.NON_RECYCLING_INSTANCE, null, getIndexFieldDataLookup(mapperService, circuitBreakerService), mapperService, null, getMockScriptService(), xContentRegistry(), - writableRegistry(), null, null, System::currentTimeMillis, null); + writableRegistry(), null, searcher, System::currentTimeMillis, null); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index bd0a5cc772f..9a157e11c72 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -24,7 +24,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -39,10 +38,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.RepositoryPlugin; -import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.fs.FsRepository; -import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -102,8 +99,6 @@ public class MockRepository extends FsRepository { private final String randomPrefix; - private volatile boolean blockOnInitialization; - private volatile boolean blockOnControlFiles; private volatile boolean blockOnDataFiles; @@ -126,21 +121,12 @@ public class MockRepository extends FsRepository { maximumNumberOfFailures = metadata.settings().getAsLong("max_failure_number", 100L); blockOnControlFiles = metadata.settings().getAsBoolean("block_on_control", false); blockOnDataFiles = metadata.settings().getAsBoolean("block_on_data", false); - blockOnInitialization = metadata.settings().getAsBoolean("block_on_init", false); blockAndFailOnWriteSnapFile = metadata.settings().getAsBoolean("block_on_snap", false); randomPrefix = metadata.settings().get("random", "default"); waitAfterUnblock = metadata.settings().getAsLong("wait_after_unblock", 0L); logger.info("starting mock repository with random prefix {}", randomPrefix); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData clusterMetadata) { - if (blockOnInitialization) { - blockExecution(); - } - super.initializeSnapshot(snapshotId, indices, clusterMetadata); - } - private static RepositoryMetaData overrideSettings(RepositoryMetaData metadata, Environment environment) { // TODO: use another method of testing not being able to read the test file written by the master... // this is super duper hacky @@ -174,7 +160,6 @@ public class MockRepository extends FsRepository { // Clean blocking flags, so we wouldn't try to block again blockOnDataFiles = false; blockOnControlFiles = false; - blockOnInitialization = false; blockOnWriteIndexFile = false; blockAndFailOnWriteSnapFile = false; this.notifyAll(); @@ -200,7 +185,7 @@ public class MockRepository extends FsRepository { logger.debug("[{}] Blocking execution", metadata.name()); boolean wasBlocked = false; try { - while (blockOnDataFiles || blockOnControlFiles || blockOnInitialization || blockOnWriteIndexFile || + while (blockOnDataFiles || blockOnControlFiles || blockOnWriteIndexFile || blockAndFailOnWriteSnapFile) { blocked = true; this.wait(); @@ -384,6 +369,9 @@ public class MockRepository extends FsRepository { public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, final boolean failIfAlreadyExists) throws IOException { final Random random = RandomizedContext.current().getRandom(); + if (blobName.startsWith("index-") && blockOnWriteIndexFile) { + blockExecutionAndFail(blobName); + } if ((delegate() instanceof FsBlobContainer) && (random.nextBoolean())) { // Simulate a failure between the write and move operation in FsBlobContainer final String tempBlobName = FsBlobContainer.tempBlobName(blobName); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java index 863a1deecf1..98c66fc3d30 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -21,7 +21,6 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.SeedUtils; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.Accountable; import org.elasticsearch.Version; @@ -41,6 +40,7 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; @@ -270,10 +270,10 @@ public abstract class AbstractBuilderTestCase extends ESTestCase { } /** - * @return a new {@link QueryShardContext} with the provided reader + * @return a new {@link QueryShardContext} with the provided searcher */ - protected static QueryShardContext createShardContext(IndexReader reader) { - return serviceHolder.createShardContext(reader); + protected static QueryShardContext createShardContext(IndexSearcher searcher) { + return serviceHolder.createShardContext(searcher); } /** @@ -422,10 +422,10 @@ public abstract class AbstractBuilderTestCase extends ESTestCase { public void close() throws IOException { } - QueryShardContext createShardContext(IndexReader reader) { - return new QueryShardContext(0, idxSettings, bitsetFilterCache, IndexSearcher::new, indexFieldDataService::getForField, - mapperService, similarityService, scriptService, xContentRegistry, namedWriteableRegistry, this.client, reader, - () -> nowInMillis, null); + QueryShardContext createShardContext(IndexSearcher searcher) { + return new QueryShardContext(0, idxSettings, BigArrays.NON_RECYCLING_INSTANCE, bitsetFilterCache, + indexFieldDataService::getForField, mapperService, similarityService, scriptService, xContentRegistry, + namedWriteableRegistry, this.client, searcher, () -> nowInMillis, null); } ScriptModule createScriptModule(List scriptPlugins) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 035ce22094f..65ef8c53f05 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -33,6 +33,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -918,10 +919,13 @@ public abstract class ESIntegTestCase extends ESTestCase { ClusterHealthResponse actionGet = client().admin().cluster().health(healthRequest).actionGet(); if (actionGet.isTimedOut()) { - logger.info("{} timed out, cluster state:\n{}\n{}", + final String hotThreads = client().admin().cluster().prepareNodesHotThreads().setIgnoreIdleThreads(false).get().getNodes() + .stream().map(NodeHotThreads::getHotThreads).collect(Collectors.joining("\n")); + logger.info("{} timed out, cluster state:\n{}\npending tasks:\n{}\nhot threads:\n{}\n", method, client().admin().cluster().prepareState().get().getState(), - client().admin().cluster().preparePendingClusterTasks().get()); + client().admin().cluster().preparePendingClusterTasks().get(), + hotThreads); fail("timed out waiting for " + color + " state"); } assertThat("Expected at least " + clusterHealthStatus + " but got " + actionGet.getStatus(), @@ -1300,7 +1304,7 @@ public abstract class ESIntegTestCase extends ESTestCase { /** * Returns true iff the given index exists otherwise false */ - protected boolean indexExists(String index) { + protected static boolean indexExists(String index) { IndicesExistsResponse actionGet = client().admin().indices().prepareExists(index).execute().actionGet(); return actionGet.isExists(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index b04982ae92e..869b4313baf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -958,10 +958,16 @@ public final class InternalTestCluster extends TestCluster { } void startNode() { + boolean success = false; try { node.start(); + success = true; } catch (NodeValidationException e) { throw new RuntimeException(e); + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(node); + } } } @@ -2197,6 +2203,7 @@ public final class InternalTestCluster extends TestCluster { .put(settings) .put(Node.NODE_MASTER_SETTING.getKey(), true) .put(Node.NODE_DATA_SETTING.getKey(), false) + .put(Node.NODE_INGEST_SETTING.getKey(), false) .build(); return startNode(settings1); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index 07b5f67db38..97b7de893ba 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ObjectMapper; +import org.elasticsearch.index.query.InnerHitContextBuilder; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; @@ -199,6 +200,14 @@ public class TestSearchContext extends SearchContext { public void highlight(SearchContextHighlight highlight) { } + @Override + public void innerHits(Map innerHits) {} + + @Override + public Map innerHits() { + return null; + } + @Override public SuggestionSearchContext suggest() { return null; diff --git a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java index 93ed0c79171..668495f6f70 100644 --- a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java +++ b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.ShardId; @@ -41,7 +42,7 @@ public class MockSearchServiceTests extends ESTestCase { public void testAssertNoInFlightContext() { final long nowInMillis = randomNonNegativeLong(); SearchContext s = new TestSearchContext(new QueryShardContext(0, - new IndexSettings(EMPTY_INDEX_METADATA, Settings.EMPTY), null, null, null, null, null, null, + new IndexSettings(EMPTY_INDEX_METADATA, Settings.EMPTY), BigArrays.NON_RECYCLING_INSTANCE, null, null, null, null, null, xContentRegistry(), writableRegistry(), null, null, () -> nowInMillis, null)) { @Override diff --git a/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc index 1b41d89db0b..ad90bef063a 100644 --- a/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc +++ b/x-pack/docs/en/rest-api/security/role-mapping-resources.asciidoc @@ -80,7 +80,7 @@ if _at least one_ of the member values matches. For example, the following rule matches any user who is a member of the `admin` group, regardless of any other groups they belong to: -[source, js] +[source,js] ------------------------------------------------------------ { "field" : { "groups" : "admin" } } ------------------------------------------------------------ diff --git a/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc index 2a60e09f65c..1ca9b101e83 100644 --- a/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/ack-watch.asciidoc @@ -58,7 +58,7 @@ status from a watch execution. To demonstrate let's create a new watch: -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/my_watch { @@ -89,17 +89,15 @@ PUT _watcher/watch/my_watch } } -------------------------------------------------- -// CONSOLE // TESTSETUP The current status of a watch and the state of its actions is returned with the watch definition when you call the <>: -[source,js] +[source,console] -------------------------------------------------- GET _watcher/watch/my_watch -------------------------------------------------- -// CONSOLE The action state of a newly-created watch is `awaits_successful_execution`: @@ -134,7 +132,7 @@ When the watch executes and the condition matches, the value of the `ack.state` changes to `ackable`. Let's force execution of the watch and fetch it again to check the status: -[source,js] +[source,console] -------------------------------------------------- POST _watcher/watch/my_watch/_execute { @@ -143,7 +141,6 @@ POST _watcher/watch/my_watch/_execute GET _watcher/watch/my_watch -------------------------------------------------- -// CONSOLE // TEST[continued] and the action is now in `ackable` state: @@ -191,12 +188,11 @@ and the action is now in `ackable` state: Now we can acknowledge it: -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/my_watch/_ack/test_index GET _watcher/watch/my_watch -------------------------------------------------- -// CONSOLE // TEST[continued] [source,console-result] @@ -247,21 +243,19 @@ condition of the watch is not met (the condition evaluates to `false`). You can acknowledge multiple actions by assigning the `actions` parameter a comma-separated list of action ids: -[source,js] +[source,console] -------------------------------------------------- POST _watcher/watch/my_watch/_ack/action1,action2 -------------------------------------------------- -// CONSOLE To acknowledge all of the actions of a watch, simply omit the `actions` parameter: -[source,js] +[source,console] -------------------------------------------------- POST _watcher/watch/my_watch/_ack -------------------------------------------------- // TEST[s/^/POST _watcher\/watch\/my_watch\/_execute\n{ "record_execution" : true }\n/] -// CONSOLE The response looks like a get watch response, but only contains the status: diff --git a/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc index a2cd3fb17d6..cc1386d0251 100644 --- a/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/activate-watch.asciidoc @@ -47,11 +47,10 @@ information, see {stack-ov}/security-privileges.html[Security privileges]. The status of an inactive watch is returned with the watch definition when you call the <>: -[source,js] +[source,console] -------------------------------------------------- GET _watcher/watch/my_watch -------------------------------------------------- -// CONSOLE // TEST[setup:my_inactive_watch] [source,console-result] @@ -80,11 +79,10 @@ GET _watcher/watch/my_watch You can activate the watch by executing the following API call: -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/my_watch/_activate -------------------------------------------------- -// CONSOLE // TEST[setup:my_inactive_watch] The new state of the watch is returned as part of its overall status: diff --git a/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc index 763020d0012..f492090420a 100644 --- a/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/deactivate-watch.asciidoc @@ -47,11 +47,10 @@ information, see {stack-ov}/security-privileges.html[Security privileges]. The status of an active watch is returned with the watch definition when you call the <>: -[source,js] +[source,console] -------------------------------------------------- GET _watcher/watch/my_watch -------------------------------------------------- -// CONSOLE // TEST[setup:my_active_watch] [source,console-result] @@ -80,11 +79,10 @@ GET _watcher/watch/my_watch You can deactivate the watch by executing the following API call: -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/my_watch/_deactivate -------------------------------------------------- -// CONSOLE // TEST[setup:my_active_watch] The new state of the watch is returned as part of its overall status: diff --git a/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc index 4f5098a185b..30aba42417a 100644 --- a/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/delete-watch.asciidoc @@ -56,11 +56,10 @@ IMPORTANT: Deleting a watch must be done via this API only. Do not delete the The following example deletes a watch with the `my-watch` id: -[source,js] +[source,console] -------------------------------------------------- DELETE _watcher/watch/my_watch -------------------------------------------------- -// CONSOLE // TEST[setup:my_active_watch] Response: diff --git a/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc index e02e839b550..112c39386fd 100644 --- a/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/execute-watch.asciidoc @@ -144,16 +144,15 @@ the watch. The following example executes the `my_watch` watch: -[source,js] +[source,console] -------------------------------------------------- POST _watcher/watch/my_watch/_execute -------------------------------------------------- -// CONSOLE // TEST[setup:my_active_watch] The following example shows a comprehensive example of executing the `my-watch` watch: -[source,js] +[source,console] -------------------------------------------------- POST _watcher/watch/my_watch/_execute { @@ -171,8 +170,8 @@ POST _watcher/watch/my_watch/_execute "record_execution" : true <5> } -------------------------------------------------- -// CONSOLE // TEST[setup:my_active_watch] + <1> The triggered and schedule times are provided. <2> The input as defined by the watch is ignored and instead the provided input is used as the execution payload. @@ -296,7 +295,7 @@ This is an example of the output: You can set a different execution mode for every action by associating the mode name with the action id: -[source,js] +[source,console] -------------------------------------------------- POST _watcher/watch/my_watch/_execute { @@ -306,13 +305,12 @@ POST _watcher/watch/my_watch/_execute } } -------------------------------------------------- -// CONSOLE // TEST[setup:my_active_watch] You can also associate a single execution mode with all the actions in the watch using `_all` as the action id: -[source,js] +[source,console] -------------------------------------------------- POST _watcher/watch/my_watch/_execute { @@ -321,12 +319,11 @@ POST _watcher/watch/my_watch/_execute } } -------------------------------------------------- -// CONSOLE // TEST[setup:my_active_watch] The following example shows how to execute a watch inline: -[source,js] +[source,console] -------------------------------------------------- POST _watcher/watch/_execute { @@ -357,13 +354,12 @@ POST _watcher/watch/_execute } } -------------------------------------------------- -// CONSOLE All other settings for this API still apply when inlining a watch. In the following snippet, while the inline watch defines a `compare` condition, during the execution this condition will be ignored: -[source,js] +[source,console] -------------------------------------------------- POST _watcher/watch/_execute { @@ -395,4 +391,3 @@ POST _watcher/watch/_execute } } -------------------------------------------------- -// CONSOLE diff --git a/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc index 5b5f43043ec..f62fc04d435 100644 --- a/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/get-watch.asciidoc @@ -45,11 +45,10 @@ this API. For more information, see The following example gets a watch with `my-watch` id: -[source,js] +[source,console] -------------------------------------------------- GET _watcher/watch/my_watch -------------------------------------------------- -// CONSOLE // TEST[setup:my_active_watch] Response: diff --git a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc index 8acbef6c559..bbfcf234923 100644 --- a/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/put-watch.asciidoc @@ -113,7 +113,7 @@ characteristics: * The watch condition checks if any search hits where found. * When found, the watch action sends an email to an administrator. -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/my-watch { @@ -161,7 +161,6 @@ PUT _watcher/watch/my-watch } } -------------------------------------------------- -// CONSOLE When you add a watch you can also define its initial {stack-ov}/how-watcher-works.html#watch-active-state[active state]. You do that diff --git a/x-pack/docs/en/rest-api/watcher/start.asciidoc b/x-pack/docs/en/rest-api/watcher/start.asciidoc index 583a69306af..e4a2441d223 100644 --- a/x-pack/docs/en/rest-api/watcher/start.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/start.asciidoc @@ -39,11 +39,10 @@ information, see {stack-ov}/security-privileges.html[Security privileges]. [[watcher-api-start-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- POST _watcher/_start -------------------------------------------------- -// CONSOLE {watcher} returns the following response if the request is successful: diff --git a/x-pack/docs/en/rest-api/watcher/stats.asciidoc b/x-pack/docs/en/rest-api/watcher/stats.asciidoc index f4c915f5612..a29b24648f7 100644 --- a/x-pack/docs/en/rest-api/watcher/stats.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/stats.asciidoc @@ -80,11 +80,10 @@ the `metric` parameter. The following example calls the `stats` API to retrieve basic metrics: -[source,js] +[source,console] -------------------------------------------------- GET _watcher/stats -------------------------------------------------- -// CONSOLE A successful call returns a JSON structure similar to the following example: @@ -109,19 +108,17 @@ number of concurrent executing watches. The following example specifies the `metric` option as a query string argument and will include the basic metrics and metrics about the current executing watches: -[source,js] +[source,console] -------------------------------------------------- GET _watcher/stats?metric=current_watches -------------------------------------------------- -// CONSOLE The following example specifies the `metric` option as part of the url path: -[source,js] +[source,console] -------------------------------------------------- GET _watcher/stats/current_watches -------------------------------------------------- -// CONSOLE The following snippet shows an example of a successful JSON response that captures a watch in execution: @@ -162,11 +159,10 @@ captures a watch in execution: The following example specifies the `queued_watches` metric option and includes both the basic metrics and the queued watches: -[source,js] +[source,console] -------------------------------------------------- GET _watcher/stats/queued_watches -------------------------------------------------- -// CONSOLE An example of a successful JSON response that captures a watch in execution: diff --git a/x-pack/docs/en/rest-api/watcher/stop.asciidoc b/x-pack/docs/en/rest-api/watcher/stop.asciidoc index d810744ad2f..ae54c2eccf6 100644 --- a/x-pack/docs/en/rest-api/watcher/stop.asciidoc +++ b/x-pack/docs/en/rest-api/watcher/stop.asciidoc @@ -39,11 +39,10 @@ information, see {stack-ov}/security-privileges.html[Security privileges]. [[watcher-api-stop-example]] ==== {api-examples-title} -[source,js] +[source,console] -------------------------------------------------- POST _watcher/_stop -------------------------------------------------- -// CONSOLE {watcher} returns the following response if the request is successful: diff --git a/x-pack/docs/en/security/auditing/output-logfile.asciidoc b/x-pack/docs/en/security/auditing/output-logfile.asciidoc index 422d987fe34..e21e2779ba4 100644 --- a/x-pack/docs/en/security/auditing/output-logfile.asciidoc +++ b/x-pack/docs/en/security/auditing/output-logfile.asciidoc @@ -23,7 +23,7 @@ Alternatively, use the {ref}/cluster-update-settings.html[cluster update settings API] to dynamically configure the logger: -[source,js] +[source,console] -------------------------------------------------- PUT /_cluster/settings { @@ -32,7 +32,6 @@ PUT /_cluster/settings } } -------------------------------------------------- -// CONSOLE NOTE: If you overwrite the `log4j2.properties` and do not specify appenders for any of the audit trails, audit events are forwarded to the root appender, which diff --git a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc index ab08bb4aaae..42378ca10ed 100644 --- a/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-active-directory-realm.asciidoc @@ -184,7 +184,8 @@ Directory `admins` group to both the `monitoring` and `user` roles, maps the role. Configured via the role-mapping API: -[source,js] + +[source,console] -------------------------------------------------- PUT /_security/role_mapping/admins { @@ -195,10 +196,10 @@ PUT /_security/role_mapping/admins "enabled": true } -------------------------------------------------- -// CONSOLE + <1> The Active Directory distinguished name (DN) of the `admins` group. -[source,js] +[source,console] -------------------------------------------------- PUT /_security/role_mapping/basic_users { @@ -214,7 +215,7 @@ PUT /_security/role_mapping/basic_users "enabled": true } -------------------------------------------------- -// CONSOLE + <1> The Active Directory distinguished name (DN) of the `users` group. <2> The Active Directory distinguished name (DN) of the user `John Doe`. diff --git a/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc index 766ae3392bc..6a99a4928c7 100644 --- a/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc @@ -151,7 +151,7 @@ users by their `username` field. The following example uses the role mapping API to map `user@REALM` to the roles `monitoring` and `user`: -[source,js] +[source,console] -------------------------------------------------- POST /_security/role_mapping/kerbrolemapping { @@ -162,7 +162,6 @@ POST /_security/role_mapping/kerbrolemapping } } -------------------------------------------------- -// CONSOLE In case you want to support Kerberos cross realm authentication you may need to map roles based on the Kerberos realm name. For such scenarios diff --git a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc index c7f793d92f3..f7288469abd 100644 --- a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc @@ -145,7 +145,8 @@ names. For example, the following mapping configuration maps the LDAP `users` group to the `user` role. Configured via the role-mapping API: -[source,js] + +[source,console] -------------------------------------------------- PUT /_security/role_mapping/admins { @@ -156,10 +157,10 @@ PUT /_security/role_mapping/admins "enabled": true } -------------------------------------------------- -// CONSOLE + <1> The LDAP distinguished name (DN) of the `admins` group. -[source,js] +[source,console] -------------------------------------------------- PUT /_security/role_mapping/basic_users { @@ -170,7 +171,7 @@ PUT /_security/role_mapping/basic_users "enabled": true } -------------------------------------------------- -// CONSOLE + <1> The LDAP distinguished name (DN) of the `users` group. Or, alternatively, configured via the role-mapping file: diff --git a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc index 0964d1795b0..9b4a8f18727 100644 --- a/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-pki-realm.asciidoc @@ -169,7 +169,8 @@ For example, the following mapping configuration maps `John Doe` to the `user` role: Using the role-mapping API: -[source,js] + +[source,console] -------------------------------------------------- PUT /_security/role_mapping/users { @@ -180,7 +181,7 @@ PUT /_security/role_mapping/users "enabled": true } -------------------------------------------------- -// CONSOLE + <1> The distinguished name (DN) of a PKI user. Or, alternatively, configured inside a role-mapping file. The file's path @@ -267,7 +268,7 @@ the following role mapping rule will assign the `role_for_pki1_direct` role to all users that have been authenticated directly by the `pki1` realm, by connecting to {es} instead of going through {kib}: -[source,js] +[source,console] -------------------------------------------------- PUT /_security/role_mapping/direct_pki_only { @@ -287,7 +288,7 @@ PUT /_security/role_mapping/direct_pki_only "enabled": true } -------------------------------------------------- -// CONSOLE + <1> only when this metadata field is set (it is *not* `null`) the user has been authenticated in the delegation scenario. diff --git a/x-pack/docs/en/security/authentication/oidc-guide.asciidoc b/x-pack/docs/en/security/authentication/oidc-guide.asciidoc index 9a73cf07245..032194c365a 100644 --- a/x-pack/docs/en/security/authentication/oidc-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/oidc-guide.asciidoc @@ -422,7 +422,7 @@ to grant roles to users authenticating via OpenID Connect. This is an example of a simple role mapping that grants the `kibana_user` role to any user who authenticates against the `oidc1` OpenID Connect realm: -[source,js] +[source,console] -------------------------------------------------- PUT /_security/role_mapping/oidc-kibana { @@ -433,8 +433,6 @@ PUT /_security/role_mapping/oidc-kibana } } -------------------------------------------------- -// CONSOLE -// TEST The user properties that are mapped via the realm configuration are used to process @@ -459,7 +457,7 @@ as per the example below. This mapping grants the {es} `finance_data` role, to any users who authenticate via the `oidc1` realm with the `finance-team` group membership. -[source,js] +[source,console] -------------------------------------------------- PUT /_security/role_mapping/oidc-finance { @@ -471,8 +469,6 @@ PUT /_security/role_mapping/oidc-finance ] } } -------------------------------------------------- -// CONSOLE -// TEST If your users also exist in a repository that can be directly accessed by {es} (such as an LDAP directory) then you can use @@ -599,17 +595,16 @@ that gives them the `manage_oidc` cluster privilege. The use of the `manage_toke cluster privilege will be necessary after the authentication takes place, so that the the user can maintain access or be subsequently logged out. -[source,js] +[source,console] -------------------------------------------------- POST /_security/role/facilitator-role { "cluster" : ["manage_oidc", "manage_token"] } -------------------------------------------------- -// CONSOLE -[source,js] +[source,console] -------------------------------------------------- POST /_security/user/facilitator { @@ -617,7 +612,6 @@ POST /_security/user/facilitator "roles" : [ "facilitator-role"] } -------------------------------------------------- -// CONSOLE ==== Handling the authentication flow @@ -629,14 +623,13 @@ authenticate a user with OpenID Connect: OpenID Connect realm in the {es} configuration in the request body. See the {ref}/security-api-oidc-prepare-authentication.html[OIDC Prepare Authentication API] for more details + -[source,js] +[source,console] -------------------------------------------------- POST /_security/oidc/prepare { "realm" : "oidc1" } -------------------------------------------------- -// CONSOLE + . Handle the response to `/_security/oidc/prepare`. The response from {es} will contain 3 parameters: `redirect`, `state`, `nonce`. The custom web application would need to store the values for `state` @@ -653,7 +646,7 @@ POST /_security/oidc/prepare used for handling this, but this parameter is optional. See {ref}/security-api-oidc-authenticate.html[OIDC Authenticate API] for more details + -[source,js] +[source,console] ----------------------------------------------------------------------- POST /_security/oidc/authenticate { @@ -663,7 +656,6 @@ POST /_security/oidc/authenticate "realm" : "oidc1" } ----------------------------------------------------------------------- -// CONSOLE // TEST[catch:unauthorized] + Elasticsearch will validate this and if all is correct will respond with an access token that can be used @@ -672,7 +664,7 @@ Elasticsearch will validate this and if all is correct will respond with an acce . At some point, if necessary, the custom web application can log the user out by using the {ref}/security-api-oidc-logout.html[OIDC Logout API] passing the access token and refresh token as parameters. For example: + -[source,js] +[source,console] -------------------------------------------------- POST /_security/oidc/logout { @@ -680,7 +672,6 @@ POST /_security/oidc/logout "refresh_token": "vLBPvmAB6KvwvJZr27cS" } -------------------------------------------------- -// CONSOLE // TEST[catch:unauthorized] + If the realm is configured accordingly, this may result in a response with a `redirect` parameter indicating where diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index d2ee6a686d0..48a6b6dbdd7 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -631,7 +631,7 @@ to grant roles to users authenticating via SAML. This is an example of a simple role mapping that grants the `kibana_user` role to any user who authenticates against the `saml1` realm: -[source,js] +[source,console] -------------------------------------------------- PUT /_security/role_mapping/saml-kibana { @@ -642,8 +642,6 @@ PUT /_security/role_mapping/saml-kibana } } -------------------------------------------------- -// CONSOLE -// TEST The attributes that are mapped via the realm configuration are used to process @@ -668,7 +666,7 @@ below. This mapping grants the {es} `finance_data` role, to any users who authenticate via the `saml1` realm with the `finance-team` group. -[source,js] +[source,console] -------------------------------------------------- PUT /_security/role_mapping/saml-finance { @@ -680,8 +678,6 @@ PUT /_security/role_mapping/saml-finance ] } } -------------------------------------------------- -// CONSOLE -// TEST If your users also exist in a repository that can be directly accessed by {es} (such as an LDAP directory) then you can use diff --git a/x-pack/docs/en/security/authentication/user-cache.asciidoc b/x-pack/docs/en/security/authentication/user-cache.asciidoc index aa5cd1dc903..e92a49aa9b7 100644 --- a/x-pack/docs/en/security/authentication/user-cache.asciidoc +++ b/x-pack/docs/en/security/authentication/user-cache.asciidoc @@ -22,7 +22,7 @@ You can use the {ref}/security-api-clear-cache.html[clear cache API] to force the eviction of cached users . For example, the following request evicts all users from the `ad1` realm: -[source, js] +[source,js] ------------------------------------------------------------ $ curl -XPOST 'http://localhost:9200/_security/realm/ad1/_clear_cache' ------------------------------------------------------------ @@ -30,7 +30,7 @@ $ curl -XPOST 'http://localhost:9200/_security/realm/ad1/_clear_cache' To clear the cache for multiple realms, specify the realms as a comma-separated list: -[source, js] +[source,js] ------------------------------------------------------------ $ curl -XPOST 'http://localhost:9200/_security/realm/ad1,ad2/_clear_cache' ------------------------------------------------------------ diff --git a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc index b9b6d44fd69..860e3b868fa 100644 --- a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc +++ b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc @@ -25,11 +25,10 @@ points to it called `current_year`, and a user with the following role: The user attempts to retrieve a document from `current_year`: -[source,shell] +[source,console] ------------------------------------------------------------------------------- GET /current_year/event/1 ------------------------------------------------------------------------------- -// CONSOLE // TEST[s/^/PUT 2015\n{"aliases": {"current_year": {}}}\nPUT 2015\/event\/1\n{}\n/] The above request gets rejected, although the user has `read` privilege on the @@ -52,7 +51,7 @@ Unlike creating indices, which requires the `create_index` privilege, adding, removing and retrieving aliases requires the `manage` permission. Aliases can be added to an index directly as part of the index creation: -[source,shell] +[source,console] ------------------------------------------------------------------------------- PUT /2015 { @@ -61,11 +60,10 @@ PUT /2015 } } ------------------------------------------------------------------------------- -// CONSOLE or via the dedicated aliases api if the index already exists: -[source,shell] +[source,console] ------------------------------------------------------------------------------- POST /_aliases { @@ -74,7 +72,6 @@ POST /_aliases ] } ------------------------------------------------------------------------------- -// CONSOLE // TEST[s/^/PUT 2015\n/] The above requests both require the `manage` privilege on the alias name as well diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc index ab58fcc817c..ee984296f08 100644 --- a/x-pack/docs/en/security/authorization/managing-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -144,7 +144,8 @@ no effect, and will not grant any actions in the ==== Example The following snippet shows an example definition of a `clicks_admin` role: -[source,js] + +[source,console] ----------- POST /_security/role/clicks_admin { @@ -162,7 +163,6 @@ POST /_security/role/clicks_admin ] } ----------- -// CONSOLE Based on the above definition, users owning the `clicks_admin` role can: diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index cf8911238a0..c202508caa3 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -96,7 +96,8 @@ user: <3> The distinguished name of an LDAP or Active Directory user. You can use the role-mapping API to define equivalent mappings as follows: -[source,js] + +[source,console] -------------------------------------------------- PUT /_security/role_mapping/admins { @@ -105,9 +106,8 @@ PUT /_security/role_mapping/admins "enabled": true } -------------------------------------------------- -// CONSOLE -[source,js] +[source,console] -------------------------------------------------- PUT /_security/role_mapping/basic_users { @@ -119,7 +119,6 @@ PUT /_security/role_mapping/basic_users "enabled": true } -------------------------------------------------- -// CONSOLE [float] [[pki-role-mapping]] @@ -140,7 +139,7 @@ user: The following example creates equivalent mappings using the API: -[source,js] +[source,console] -------------------------------------------------- PUT /_security/role_mapping/admin_user { @@ -149,9 +148,8 @@ PUT /_security/role_mapping/admin_user "enabled": true } -------------------------------------------------- -// CONSOLE -[source,js] +[source,console] -------------------------------------------------- PUT /_security/role_mapping/basic_user { @@ -160,4 +158,3 @@ PUT /_security/role_mapping/basic_user "enabled": true } -------------------------------------------------- -// CONSOLE diff --git a/x-pack/docs/en/security/authorization/role-templates.asciidoc b/x-pack/docs/en/security/authorization/role-templates.asciidoc index 37cece88c64..d300233bb92 100644 --- a/x-pack/docs/en/security/authorization/role-templates.asciidoc +++ b/x-pack/docs/en/security/authorization/role-templates.asciidoc @@ -12,7 +12,7 @@ authenticated user through the `_user` parameter. For example, the following role query uses a template to insert the username of the current authenticated user: -[source,js] +[source,console] -------------------------------------------------- POST /_security/role/example1 { @@ -31,7 +31,6 @@ POST /_security/role/example1 ] } -------------------------------------------------- -// CONSOLE You can access the following information through the `_user` variable: @@ -49,7 +48,7 @@ You can also access custom user metadata. For example, if you maintain a `group_id` in your user metadata, you can apply document level security based on the `group.id` field in your documents: -[source,js] +[source,console] -------------------------------------------------- POST /_security/role/example2 { @@ -67,5 +66,4 @@ POST /_security/role/example2 } ] } --------------------------------------------------- -// CONSOLE \ No newline at end of file +-------------------------------------------------- \ No newline at end of file diff --git a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc index cc617c7db3f..97ef0f0cf12 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations/cross-cluster.asciidoc @@ -45,7 +45,7 @@ For more information about the `xpack.security.enabled` setting, see to the local cluster: + -- -[source,js] +[source,console] ----------------------------------------------------------- PUT _cluster/settings { @@ -63,7 +63,6 @@ PUT _cluster/settings } } ----------------------------------------------------------- -// CONSOLE -- * On the local cluster, ensure that users are assigned to (at least) one role @@ -78,7 +77,7 @@ to search any index starting with `logs-` in cluster `two` from cluster `one`. First, enable cluster `one` to perform cross cluster search on remote cluster `two` by running the following request as the superuser on cluster `one`: -[source,js] +[source,console] ----------------------------------------------------------- PUT _cluster/settings { @@ -87,25 +86,23 @@ PUT _cluster/settings } } ----------------------------------------------------------- -// CONSOLE Next, set up a role called `cluster_two_logs` on both cluster `one` and cluster `two`. On cluster `one`, this role does not need any special privileges: -[source,js] +[source,console] ----------------------------------------------------------- POST /_security/role/cluster_two_logs { } ----------------------------------------------------------- -// CONSOLE On cluster `two`, this role allows the user to query local indices called `logs-` from a remote cluster: -[source,js] +[source,console] ----------------------------------------------------------- POST /_security/role/cluster_two_logs { @@ -123,11 +120,10 @@ POST /_security/role/cluster_two_logs ] } ----------------------------------------------------------- -// CONSOLE Finally, create a user on cluster `one` and apply the `cluster_two_logs` role: -[source,js] +[source,console] ----------------------------------------------------------- POST /_security/user/alice { @@ -138,12 +134,11 @@ POST /_security/user/alice "enabled": true } ----------------------------------------------------------- -// CONSOLE With all of the above setup, the user `alice` is able to search indices in cluster `two` as follows: -[source,js] +[source,console] ----------------------------------------------------------- GET two:logs-2017.04/_search <1> { @@ -152,8 +147,7 @@ GET two:logs-2017.04/_search <1> } } ----------------------------------------------------------- -// CONSOLE // TEST[skip:todo] //TBD: Is there a missing description of the <1> callout above? -include::{kib-repo-dir}/security/cross-cluster-kibana.asciidoc[] +include::{kib-repo-dir}/user/security/cross-cluster-kibana.asciidoc[] diff --git a/x-pack/docs/en/security/using-ip-filtering.asciidoc b/x-pack/docs/en/security/using-ip-filtering.asciidoc index 4e99ec4903d..2041b1ace4c 100644 --- a/x-pack/docs/en/security/using-ip-filtering.asciidoc +++ b/x-pack/docs/en/security/using-ip-filtering.asciidoc @@ -114,7 +114,7 @@ based hosting, it is very hard to know the IP addresses upfront when provisionin a machine. Instead of changing the configuration file and restarting the node, you can use the _Cluster Update Settings API_. For example: -[source,js] +[source,console] -------------------------------------------------- PUT /_cluster/settings { @@ -123,11 +123,10 @@ PUT /_cluster/settings } } -------------------------------------------------- -// CONSOLE You can also dynamically disable filtering completely: -[source,js] +[source,console] -------------------------------------------------- PUT /_cluster/settings { @@ -136,7 +135,6 @@ PUT /_cluster/settings } } -------------------------------------------------- -// CONSOLE // TEST[continued] NOTE: In order to avoid locking yourself out of the cluster, the default bound diff --git a/x-pack/docs/en/watcher/actions.asciidoc b/x-pack/docs/en/watcher/actions.asciidoc index 52bd5d732b6..6697fe59083 100644 --- a/x-pack/docs/en/watcher/actions.asciidoc +++ b/x-pack/docs/en/watcher/actions.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[actions]] == Actions @@ -44,7 +45,7 @@ time frame (`now - throttling period`). The following snippet shows a watch for the scenario described above - associating a throttle period with the `email_administrator` action: -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/error_logs_alert { @@ -90,7 +91,7 @@ PUT _watcher/watch/error_logs_alert } } -------------------------------------------------- -// CONSOLE + <1> There will be at least 15 minutes between subsequent `email_administrator` action executions. <2> See <> for more information. @@ -99,7 +100,7 @@ You can also define a throttle period at the watch level. The watch-level throttle period serves as the default throttle period for all of the actions defined in the watch: -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/log_event_watch { @@ -149,7 +150,6 @@ PUT _watcher/watch/log_event_watch } } -------------------------------------------------- -// CONSOLE <1> There will be at least 15 minutes between subsequent action executions (applies to both `email_administrator` and `notify_pager` actions) @@ -175,11 +175,10 @@ When that happens, the action's state changes to `awaits_successful_execution`. To acknowledge an action, you use the {ref}/watcher-api-ack-watch.html[Ack Watch API]: -[source,js] +[source,console] ---------------------------------------------------------------------- POST _watcher/watch//_ack/ ---------------------------------------------------------------------- -// CONSOLE // TEST[skip:https://github.com/elastic/x-plugins/issues/2513] Where `` is the id of the watch and `` is a comma-separated list @@ -191,7 +190,7 @@ of a watch during its execution: image::images/action-throttling.jpg[align="center"] - +[role="xpack"] [[action-foreach]] === Running an action for each element in an array @@ -203,7 +202,7 @@ field to limit the maximum amount of runs that each watch executes. If this limi is reached, the execution is gracefully stopped. If not set, this field defaults to one hundred. -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/log_event_watch { @@ -234,10 +233,10 @@ PUT _watcher/watch/log_event_watch } } -------------------------------------------------- -// CONSOLE <1> The logging statement will be executed for each of the returned search hits. +[role="xpack"] [[action-conditions]] === Adding conditions to actions @@ -248,7 +247,7 @@ on a their respective conditions. The following watch would always send an email hits are found from the input search, but only trigger the `notify_pager` action when there are more than 5 hits in the search result. -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/log_event_watch { @@ -300,7 +299,6 @@ PUT _watcher/watch/log_event_watch } } -------------------------------------------------- -// CONSOLE <1> A `condition` that only applies to the `notify_pager` action, which restricts its execution to when the condition succeeds (at least 5 hits in this case). diff --git a/x-pack/docs/en/watcher/actions/email.asciidoc b/x-pack/docs/en/watcher/actions/email.asciidoc index 2d6b35c29ee..c249aa85572 100644 --- a/x-pack/docs/en/watcher/actions/email.asciidoc +++ b/x-pack/docs/en/watcher/actions/email.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[actions-email]] === Email Action @@ -156,9 +157,9 @@ killed by firewalls or load balancers in-between. You can use the `reporting` attachment type in an `email` action to automatically generate a Kibana report and distribute it via email. -include::{kib-repo-dir}/reporting/watch-example.asciidoc[] +include::{kib-repo-dir}/user/reporting/watch-example.asciidoc[] -include::{kib-repo-dir}/reporting/report-intervals.asciidoc[] +include::{kib-repo-dir}/user/reporting/report-intervals.asciidoc[] For more information, see {kibana-ref}/automating-report-generation.html[Automating Report Generation]. diff --git a/x-pack/docs/en/watcher/actions/index.asciidoc b/x-pack/docs/en/watcher/actions/index.asciidoc index 34fdad7c50d..faaf80365aa 100644 --- a/x-pack/docs/en/watcher/actions/index.asciidoc +++ b/x-pack/docs/en/watcher/actions/index.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[actions-index]] === Index Action diff --git a/x-pack/docs/en/watcher/actions/jira.asciidoc b/x-pack/docs/en/watcher/actions/jira.asciidoc index 4608ee6ab1a..514618d5b57 100644 --- a/x-pack/docs/en/watcher/actions/jira.asciidoc +++ b/x-pack/docs/en/watcher/actions/jira.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[actions-jira]] === Jira Action diff --git a/x-pack/docs/en/watcher/actions/logging.asciidoc b/x-pack/docs/en/watcher/actions/logging.asciidoc index a8a4454c377..e1b6d31d86d 100644 --- a/x-pack/docs/en/watcher/actions/logging.asciidoc +++ b/x-pack/docs/en/watcher/actions/logging.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[actions-logging]] === Logging Action diff --git a/x-pack/docs/en/watcher/actions/pagerduty.asciidoc b/x-pack/docs/en/watcher/actions/pagerduty.asciidoc index 1cd9132a57b..2d9d091882f 100644 --- a/x-pack/docs/en/watcher/actions/pagerduty.asciidoc +++ b/x-pack/docs/en/watcher/actions/pagerduty.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[actions-pagerduty]] === PagerDuty Action diff --git a/x-pack/docs/en/watcher/actions/slack.asciidoc b/x-pack/docs/en/watcher/actions/slack.asciidoc index 0e9177c604d..03dce981375 100644 --- a/x-pack/docs/en/watcher/actions/slack.asciidoc +++ b/x-pack/docs/en/watcher/actions/slack.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[actions-slack]] === Slack Action diff --git a/x-pack/docs/en/watcher/actions/webhook.asciidoc b/x-pack/docs/en/watcher/actions/webhook.asciidoc index 1b7c482d2c4..ce326f74ae3 100644 --- a/x-pack/docs/en/watcher/actions/webhook.asciidoc +++ b/x-pack/docs/en/watcher/actions/webhook.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[actions-webhook]] === Webhook Action diff --git a/x-pack/docs/en/watcher/condition.asciidoc b/x-pack/docs/en/watcher/condition.asciidoc index 1935b4dc31d..f3803ea713b 100644 --- a/x-pack/docs/en/watcher/condition.asciidoc +++ b/x-pack/docs/en/watcher/condition.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[condition]] == Conditions diff --git a/x-pack/docs/en/watcher/condition/always.asciidoc b/x-pack/docs/en/watcher/condition/always.asciidoc index c2eb37be52c..8bf60d0e77b 100644 --- a/x-pack/docs/en/watcher/condition/always.asciidoc +++ b/x-pack/docs/en/watcher/condition/always.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[condition-always]] === Always Condition diff --git a/x-pack/docs/en/watcher/condition/array-compare.asciidoc b/x-pack/docs/en/watcher/condition/array-compare.asciidoc index e82eaf53840..1e9d3113e8c 100644 --- a/x-pack/docs/en/watcher/condition/array-compare.asciidoc +++ b/x-pack/docs/en/watcher/condition/array-compare.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[condition-array-compare]] === Array Compare Condition diff --git a/x-pack/docs/en/watcher/condition/compare.asciidoc b/x-pack/docs/en/watcher/condition/compare.asciidoc index b275776a2e5..b3938ba9758 100644 --- a/x-pack/docs/en/watcher/condition/compare.asciidoc +++ b/x-pack/docs/en/watcher/condition/compare.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[condition-compare]] === Compare Condition diff --git a/x-pack/docs/en/watcher/condition/never.asciidoc b/x-pack/docs/en/watcher/condition/never.asciidoc index b8cad0b8c04..84d8a74a261 100644 --- a/x-pack/docs/en/watcher/condition/never.asciidoc +++ b/x-pack/docs/en/watcher/condition/never.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[condition-never]] === Never Condition diff --git a/x-pack/docs/en/watcher/condition/script.asciidoc b/x-pack/docs/en/watcher/condition/script.asciidoc index ee6a9531bf7..4aabfc9144c 100644 --- a/x-pack/docs/en/watcher/condition/script.asciidoc +++ b/x-pack/docs/en/watcher/condition/script.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[condition-script]] === Script Condition diff --git a/x-pack/docs/en/watcher/customizing-watches.asciidoc b/x-pack/docs/en/watcher/customizing-watches.asciidoc index dac78450e6a..622a96a64b4 100644 --- a/x-pack/docs/en/watcher/customizing-watches.asciidoc +++ b/x-pack/docs/en/watcher/customizing-watches.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[customizing-watches]] == Customizing Watches diff --git a/x-pack/docs/en/watcher/encrypting-data.asciidoc b/x-pack/docs/en/watcher/encrypting-data.asciidoc index 66138b54efb..c0bcbec74b1 100644 --- a/x-pack/docs/en/watcher/encrypting-data.asciidoc +++ b/x-pack/docs/en/watcher/encrypting-data.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[encrypting-data]] == Encrypting Sensitive Data in {watcher} diff --git a/x-pack/docs/en/watcher/example-watches.asciidoc b/x-pack/docs/en/watcher/example-watches.asciidoc index 2d747caba5c..90807d4fc03 100644 --- a/x-pack/docs/en/watcher/example-watches.asciidoc +++ b/x-pack/docs/en/watcher/example-watches.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[example-watches]] == Example Watches The following examples show how to set up watches to: diff --git a/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc b/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc index 5506f206b45..02133303e60 100644 --- a/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc +++ b/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[watch-cluster-status]] === Watching the Status of an Elasticsearch Cluster @@ -22,7 +23,7 @@ The watch <> gets the data that you want to evaluate. The simplest way to define a schedule is to specify an interval. For example, the following schedule runs every 10 seconds: -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/cluster_health_watch { @@ -31,7 +32,7 @@ PUT _watcher/watch/cluster_health_watch } } -------------------------------------------------- -// CONSOLE + <1> Schedules are typically configured to run less frequently. This example sets the interval to 10 seconds to you can easily see the watches being triggered. Since this watch runs so frequently, don't forget to <> @@ -40,17 +41,16 @@ PUT _watcher/watch/cluster_health_watch To get the status of your cluster, you can call the Elasticsearch {ref}//cluster-health.html[cluster health] API: -[source,js] +[source,console] -------------------------------------------------- GET _cluster/health?pretty -------------------------------------------------- -// CONSOLE // TEST[continued] To load the health status into your watch, you simply add an <> that calls the cluster health API: -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/cluster_health_watch { @@ -68,11 +68,10 @@ PUT _watcher/watch/cluster_health_watch } } -------------------------------------------------- -// CONSOLE If you're using Security, then you'll also need to supply some authentication credentials as part of the watch configuration: -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/cluster_health_watch { @@ -96,7 +95,6 @@ PUT _watcher/watch/cluster_health_watch } } -------------------------------------------------- -// CONSOLE It would be a good idea to create a user with the minimum privileges required for use with such a watch configuration. @@ -109,7 +107,7 @@ as part of the `watch_record` each time the watch executes. For example, the following request retrieves the last ten watch records from the watch history: -[source,js] +[source,console] -------------------------------------------------- GET .watcher-history*/_search { @@ -118,7 +116,6 @@ GET .watcher-history*/_search ] } -------------------------------------------------- -// CONSOLE // TEST[continued] [float] @@ -132,7 +129,7 @@ status. For example, you could add a condition to check to see if the status is RED. -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/cluster_health_watch { @@ -155,7 +152,7 @@ PUT _watcher/watch/cluster_health_watch } } -------------------------------------------------- -// CONSOLE + <1> Schedules are typically configured to run less frequently. This example sets the interval to 10 seconds to you can easily see the watches being triggered. @@ -164,7 +161,7 @@ as part of the `watch_record` each time the watch executes. To check to see if the condition was met, you can run the following query. -[source,js] +[source,console] ------------------------------------------------------ GET .watcher-history*/_search?pretty { @@ -173,7 +170,6 @@ GET .watcher-history*/_search?pretty } } ------------------------------------------------------ -// CONSOLE // TEST[continued] [float] @@ -189,7 +185,7 @@ Elasticsearch index or log when the watch condition is met. For example, you could add an action to index the cluster status information when the status is RED. -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/cluster_health_watch { @@ -221,7 +217,6 @@ PUT _watcher/watch/cluster_health_watch } } -------------------------------------------------- -// CONSOLE For {watcher} to send email, you must configure an email account in your `elasticsearch.yml` configuration file and restart Elasticsearch. To add an email @@ -256,7 +251,7 @@ NOTE: If you have advanced security options enabled for your email account, You can check the watch history or the `status_index` to see that the action was performed. -[source,js] +[source,console] ------------------------------------------------------- GET .watcher-history*/_search?pretty { @@ -265,7 +260,6 @@ GET .watcher-history*/_search?pretty } } ------------------------------------------------------- -// CONSOLE // TEST[continued] [float] @@ -278,9 +272,8 @@ indefinitely. To remove the watch, use the {ref}/watcher-api-delete-watch.html[DELETE watch API]: -[source,js] +[source,console] ------------------------------------------------------- DELETE _watcher/watch/cluster_health_watch ------------------------------------------------------- -// CONSOLE // TEST[continued] diff --git a/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc b/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc index 1f6c951e504..cdb4362f5b3 100644 --- a/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc +++ b/x-pack/docs/en/watcher/example-watches/example-watch-meetupdata.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[watching-meetup-data]] === Watching Event Data @@ -192,7 +193,8 @@ NOTE: To enable Watcher to send emails, you must configure an email account in ` The complete watch looks like this: -[source,js] + +[source,console] -------------------------------------------------- PUT _watcher/watch/meetup { @@ -288,7 +290,6 @@ PUT _watcher/watch/meetup } } -------------------------------------------------- -// CONSOLE <1> The email body can include Mustache templates to reference data in the watch payload. By default,it will be <> to block dangerous content. <2> Replace the `from` address with the email address you configured in `elasticsearch.yml`. @@ -298,9 +299,8 @@ PUT _watcher/watch/meetup Now that you've created your watch, you can use the {ref}/watcher-api-execute-watch.html[`_execute` API] to run it without waiting for the schedule to trigger execution: -[source,js] +[source,console] -------------------------------------------------- POST _watcher/watch/meetup/_execute -------------------------------------------------- -// CONSOLE // TEST[continued] diff --git a/x-pack/docs/en/watcher/example-watches/watching-time-series-data.asciidoc b/x-pack/docs/en/watcher/example-watches/watching-time-series-data.asciidoc index 74074d2ac3d..fa389c9ecf0 100644 --- a/x-pack/docs/en/watcher/example-watches/watching-time-series-data.asciidoc +++ b/x-pack/docs/en/watcher/example-watches/watching-time-series-data.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[watching-time-series-data]] === Watching Time Series Data @@ -155,7 +156,7 @@ specify which one you want to send the email with. For more information, see The complete watch looks like this: -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/rss_watch { @@ -203,8 +204,8 @@ PUT _watcher/watch/rss_watch } } -------------------------------------------------- -// CONSOLE // TEST[s/"id" : "threshold_hits"/"source": "return ctx.payload.hits.total.value > params.threshold"/] + <1> Replace `username@example.org` with your email address to receive notifications. @@ -213,7 +214,7 @@ PUT _watcher/watch/rss_watch To execute a watch immediately (without waiting for the schedule to trigger), use the {ref}/watcher-api-execute-watch.html[`_execute` API]: -[source,js] +[source,console] -------------------------------------------------- POST _watcher/watch/rss_watch/_execute { @@ -224,6 +225,5 @@ POST _watcher/watch/rss_watch/_execute "record_execution" : true } -------------------------------------------------- -// CONSOLE // TEST[continued] ================================================= diff --git a/x-pack/docs/en/watcher/getting-started.asciidoc b/x-pack/docs/en/watcher/getting-started.asciidoc index 8556f6b564b..8b95f905c09 100644 --- a/x-pack/docs/en/watcher/getting-started.asciidoc +++ b/x-pack/docs/en/watcher/getting-started.asciidoc @@ -1,9 +1,10 @@ +[role="xpack"] [[watcher-getting-started]] -== Getting Started with {watcher} +== Getting started with {watcher} -By default, when you install {es} and {kib}, {xpack} is installed and the -{watcher} is enabled. You cannot use {watcher} with the free basic license, but -you can try all of the {xpack} features with a <>. +TIP: To complete these steps, you must obtain a license that includes the +{alert-features}. For more information about Elastic license levels, see +https://www.elastic.co/subscriptions and <>. [[watch-log-data]] To set up a watch to start sending alerts: @@ -26,7 +27,7 @@ watch, you could use an {xpack-ref}/trigger-schedule.html#schedule-interval[inte {xpack-ref}/input-search.html[search] input. For example, the following Watch searches the `logs` index for errors every 10 seconds: -[source,js] +[source,console] ------------------------------------------------------------ PUT _watcher/watch/log_error_watch { @@ -47,7 +48,7 @@ PUT _watcher/watch/log_error_watch } } ------------------------------------------------------------ -// CONSOLE + <1> Schedules are typically configured to run less frequently. This example sets the interval to 10 seconds so you can easily see the watches being triggered. Since this watch runs so frequently, don't forget to <> @@ -60,7 +61,7 @@ into the watch payload. For example, the following request retrieves the last ten watch executions (watch records) from the watch history: -[source,js] +[source,console] ------------------------------------------------------------ GET .watcher-history*/_search?pretty { @@ -69,7 +70,6 @@ GET .watcher-history*/_search?pretty ] } ------------------------------------------------------------ -// CONSOLE // TEST[continued] [float] @@ -84,7 +84,7 @@ found. For example, the following compare condition simply checks to see if the search input returned any hits. -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/log_error_watch { @@ -106,7 +106,7 @@ PUT _watcher/watch/log_error_watch } } -------------------------------------------------- -// CONSOLE + <1> The {xpack-ref}/condition-compare.html[compare] condition lets you easily compare against values in the execution context. @@ -114,7 +114,7 @@ For this compare condition to evaluate to `true`, you need to add an event to the `logs` index that contains an error. For example, the following request adds a 404 error to the `logs` index: -[source,js] +[source,console] -------------------------------------------------- POST logs/event { @@ -124,7 +124,6 @@ POST logs/event "message" : "Error: File not found" } -------------------------------------------------- -// CONSOLE // TEST[continued] Once you add this event, the next time the watch executes its condition will @@ -132,7 +131,7 @@ evaluate to `true`. The condition result is recorded as part of the `watch_record` each time the watch executes, so you can verify whether or not the condition was met by searching the watch history: -[source,js] +[source,console] -------------------------------------------------- GET .watcher-history*/_search?pretty { @@ -146,7 +145,6 @@ GET .watcher-history*/_search?pretty } } -------------------------------------------------- -// CONSOLE // TEST[continued] [float] @@ -163,7 +161,7 @@ Elasticsearch log files. For example, the following action writes a message to the Elasticsearch log when an error is detected. -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/log_error_watch { @@ -192,7 +190,6 @@ PUT _watcher/watch/log_error_watch } } -------------------------------------------------- -// CONSOLE [float] [[log-delete]] @@ -205,11 +202,10 @@ log file. To remove the watch, use the {ref}/watcher-api-delete-watch.html[DELETE watch API]: -[source,js] +[source,console] -------------------------------------------------- DELETE _watcher/watch/log_error_watch -------------------------------------------------- -// CONSOLE // TEST[continued] [float] diff --git a/x-pack/docs/en/watcher/gs-index.asciidoc b/x-pack/docs/en/watcher/gs-index.asciidoc index c26789ca423..b8dbdc36b9a 100644 --- a/x-pack/docs/en/watcher/gs-index.asciidoc +++ b/x-pack/docs/en/watcher/gs-index.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[xpack-alerting]] = Alerting on Cluster and Index Events diff --git a/x-pack/docs/en/watcher/how-watcher-works.asciidoc b/x-pack/docs/en/watcher/how-watcher-works.asciidoc index 5ecc5b41ec6..d339228dba0 100644 --- a/x-pack/docs/en/watcher/how-watcher-works.asciidoc +++ b/x-pack/docs/en/watcher/how-watcher-works.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[how-watcher-works]] == How {watcher} Works @@ -46,7 +47,7 @@ For example, the following snippet shows a {ref}/watcher-api-put-watch.html[Put Watch] request that defines a watch that looks for log error events: -[source,js] +[source,console] -------------------------------------------------- PUT _watcher/watch/log_errors { @@ -110,7 +111,7 @@ PUT _watcher/watch/log_errors } } -------------------------------------------------- -// CONSOLE + <1> Metadata - You can attach optional static metadata to a watch. <2> Trigger - This schedule trigger executes the watch every 5 minutes. <3> Input - This input searches for errors in the `log-events` index and @@ -152,14 +153,13 @@ dedicated watcher nodes by using shard allocation filtering. You could configure nodes with a dedicated `node.attr.role: watcher` property and then configure the `.watches` index like this: -[source,js] +[source,console] ------------------------ PUT .watches/_settings { "index.routing.allocation.include.role": "watcher" } ------------------------ -// CONSOLE // TEST[skip:indexes don't assign] When the {watcher} service is stopped, the scheduler stops with it. Trigger diff --git a/x-pack/docs/en/watcher/index.asciidoc b/x-pack/docs/en/watcher/index.asciidoc index 63d6b8ad9ea..f1d963fa546 100644 --- a/x-pack/docs/en/watcher/index.asciidoc +++ b/x-pack/docs/en/watcher/index.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[xpack-alerting]] = Alerting on cluster and index events diff --git a/x-pack/docs/en/watcher/input.asciidoc b/x-pack/docs/en/watcher/input.asciidoc index d74f5cd80f1..fd1f017eeb7 100644 --- a/x-pack/docs/en/watcher/input.asciidoc +++ b/x-pack/docs/en/watcher/input.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[input]] == Inputs diff --git a/x-pack/docs/en/watcher/input/chain.asciidoc b/x-pack/docs/en/watcher/input/chain.asciidoc index 9898880a9a7..87d4944d88f 100644 --- a/x-pack/docs/en/watcher/input/chain.asciidoc +++ b/x-pack/docs/en/watcher/input/chain.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[input-chain]] === Chain Input diff --git a/x-pack/docs/en/watcher/input/http.asciidoc b/x-pack/docs/en/watcher/input/http.asciidoc index 79d37d14a1b..e9d71ead887 100644 --- a/x-pack/docs/en/watcher/input/http.asciidoc +++ b/x-pack/docs/en/watcher/input/http.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[input-http]] === HTTP Input diff --git a/x-pack/docs/en/watcher/input/search.asciidoc b/x-pack/docs/en/watcher/input/search.asciidoc index 1d21de7b8c1..729a7c7d56a 100644 --- a/x-pack/docs/en/watcher/input/search.asciidoc +++ b/x-pack/docs/en/watcher/input/search.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[input-search]] === Search Input diff --git a/x-pack/docs/en/watcher/input/simple.asciidoc b/x-pack/docs/en/watcher/input/simple.asciidoc index c756a4e5403..d92f01e1637 100644 --- a/x-pack/docs/en/watcher/input/simple.asciidoc +++ b/x-pack/docs/en/watcher/input/simple.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[input-simple]] === Simple Input diff --git a/x-pack/docs/en/watcher/managing-watches.asciidoc b/x-pack/docs/en/watcher/managing-watches.asciidoc index a155132d5e4..75939a0be45 100644 --- a/x-pack/docs/en/watcher/managing-watches.asciidoc +++ b/x-pack/docs/en/watcher/managing-watches.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[managing-watches]] == Managing Watches @@ -25,12 +26,11 @@ IMPORTANT: You can only perform read actions on the `.watches` index. You must For example, the following returns the first 100 watches: -[source,js] +[source,console] -------------------------------------------------- GET .watches/_search { "size" : 100 } -------------------------------------------------- -// CONSOLE // TEST[setup:my_active_watch] diff --git a/x-pack/docs/en/watcher/release-notes.asciidoc b/x-pack/docs/en/watcher/release-notes.asciidoc index 5875458a154..af45c4d9c1d 100644 --- a/x-pack/docs/en/watcher/release-notes.asciidoc +++ b/x-pack/docs/en/watcher/release-notes.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[watcher-release-notes]] == Watcher Release Notes (Pre-5.0) diff --git a/x-pack/docs/en/watcher/transform.asciidoc b/x-pack/docs/en/watcher/transform.asciidoc index 4fc6ea66b17..18702934d3e 100644 --- a/x-pack/docs/en/watcher/transform.asciidoc +++ b/x-pack/docs/en/watcher/transform.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[transform]] == Transforms diff --git a/x-pack/docs/en/watcher/transform/chain.asciidoc b/x-pack/docs/en/watcher/transform/chain.asciidoc index 4f7fad37256..882db6986e0 100644 --- a/x-pack/docs/en/watcher/transform/chain.asciidoc +++ b/x-pack/docs/en/watcher/transform/chain.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[transform-chain]] === Chain Transform diff --git a/x-pack/docs/en/watcher/transform/script.asciidoc b/x-pack/docs/en/watcher/transform/script.asciidoc index 9a1377eb5ea..7d93a87a941 100644 --- a/x-pack/docs/en/watcher/transform/script.asciidoc +++ b/x-pack/docs/en/watcher/transform/script.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[transform-script]] === Script Transform diff --git a/x-pack/docs/en/watcher/transform/search.asciidoc b/x-pack/docs/en/watcher/transform/search.asciidoc index 439164429c0..4fc203bc3d3 100644 --- a/x-pack/docs/en/watcher/transform/search.asciidoc +++ b/x-pack/docs/en/watcher/transform/search.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[transform-search]] === Search Transform diff --git a/x-pack/docs/en/watcher/trigger.asciidoc b/x-pack/docs/en/watcher/trigger.asciidoc index ee52dbba3bd..222ec9e1036 100644 --- a/x-pack/docs/en/watcher/trigger.asciidoc +++ b/x-pack/docs/en/watcher/trigger.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[trigger]] == Triggers diff --git a/x-pack/docs/en/watcher/trigger/schedule.asciidoc b/x-pack/docs/en/watcher/trigger/schedule.asciidoc index 7cd38c5fc9b..a8a5fb4b32e 100644 --- a/x-pack/docs/en/watcher/trigger/schedule.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[trigger-schedule]] === Schedule Trigger diff --git a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc index 2e5b2feb8b3..a5d05f8666d 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[schedule-cron]] ==== `cron` schedule diff --git a/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc index e729335d59b..39dbbc7f4fb 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/daily.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[schedule-daily]] ==== Daily Schedule diff --git a/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc index 9ec750eebcd..2e462667977 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/hourly.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[schedule-hourly]] ==== Hourly Schedule diff --git a/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc index e534181ec0c..14d22a60a33 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/interval.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[schedule-interval]] ==== Interval Schedule diff --git a/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc index d2cfe409992..5dae3a96302 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/monthly.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[schedule-monthly]] ==== Monthly Schedule diff --git a/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc index d6a403cb125..ddcb4932631 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/weekly.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[schedule-weekly]] ==== Weekly Schedule diff --git a/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc index d11cc5d0727..0a12eb0a8ca 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/yearly.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[schedule-yearly]] ==== Yearly Schedule diff --git a/x-pack/docs/en/watcher/troubleshooting.asciidoc b/x-pack/docs/en/watcher/troubleshooting.asciidoc index 0e6e463e35f..e6c193896f4 100644 --- a/x-pack/docs/en/watcher/troubleshooting.asciidoc +++ b/x-pack/docs/en/watcher/troubleshooting.asciidoc @@ -14,11 +14,10 @@ If you get the _Dynamic Mapping is Disabled_ error when you try to add a watch, verify that the index mappings for the `.watches` index are available. You can do that by submitting the following request: -[source,js] +[source,console] -------------------------------------------------- GET .watches/_mapping -------------------------------------------------- -// CONSOLE // TEST[setup:my_active_watch] If the index mappings are missing, follow these steps to restore the correct @@ -30,11 +29,10 @@ mappings: . Delete the `.watches` index: + -- -[source,js] +[source,console] -------------------------------------------------- DELETE .watches -------------------------------------------------- -// CONSOLE // TEST[skip:index deletion] -- . Disable direct access to the `.watches` index: diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/StubAggregatorFactory.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/StubAggregatorFactory.java index edcd66ab422..ace86ddea6b 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/StubAggregatorFactory.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/StubAggregatorFactory.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -30,23 +31,26 @@ public class StubAggregatorFactory extends AggregatorFactory { private final Aggregator aggregator; - private StubAggregatorFactory(SearchContext context, Aggregator aggregator) throws IOException { - super("_name", context, null, new AggregatorFactories.Builder(), Collections.emptyMap()); + private StubAggregatorFactory(QueryShardContext queryShardContext, Aggregator aggregator) throws IOException { + super("_name", queryShardContext, null, new AggregatorFactories.Builder(), Collections.emptyMap()); this.aggregator = aggregator; } @Override - protected Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBucket, List list, Map metaData) throws IOException { + protected Aggregator createInternal(SearchContext searchContext, + Aggregator parent, + boolean collectsFromSingleBucket, + List list, Map metaData) throws IOException { return aggregator; } public static StubAggregatorFactory createInstance() throws IOException { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); - SearchContext searchContext = mock(SearchContext.class); - when(searchContext.bigArrays()).thenReturn(bigArrays); + QueryShardContext queryShardContext = mock(QueryShardContext.class); + when(queryShardContext.bigArrays()).thenReturn(bigArrays); Aggregator aggregator = mock(Aggregator.class); - return new StubAggregatorFactory(searchContext, aggregator); + return new StubAggregatorFactory(queryShardContext, aggregator); } } diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityAggregatorTests.java index d4d47a5bdfe..2e569f19c37 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityAggregatorTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -43,7 +44,6 @@ import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.xpack.analytics.StubAggregatorFactory; import java.io.IOException; @@ -122,7 +122,7 @@ public class CumulativeCardinalityAggregatorTests extends AggregatorTestCase { Set aggBuilders = new HashSet<>(); aggBuilders.add(new CumulativeCardinalityPipelineAggregationBuilder("cumulative_card", "sum")); AggregatorFactory parent = new HistogramAggregatorFactory("name", valuesSourceConfig, 0.0d, 0.0d, - mock(InternalOrder.class), false, 0L, 0.0d, 1.0d, mock(SearchContext.class), null, + mock(InternalOrder.class), false, 0L, 0.0d, 1.0d, mock(QueryShardContext.class), null, new AggregatorFactories.Builder(), Collections.emptyMap()); CumulativeCardinalityPipelineAggregationBuilder builder = new CumulativeCardinalityPipelineAggregationBuilder("name", "valid"); @@ -133,7 +133,7 @@ public class CumulativeCardinalityAggregatorTests extends AggregatorTestCase { aggBuilders.add(new CumulativeCardinalityPipelineAggregationBuilder("cumulative_card", "sum")); parent = new DateHistogramAggregatorFactory("name", valuesSourceConfig, 0L, mock(InternalOrder.class), false, 0L, mock(Rounding.class), mock(Rounding.class), - mock(ExtendedBounds.class), mock(SearchContext.class), mock(AggregatorFactory.class), + mock(ExtendedBounds.class), mock(QueryShardContext.class), mock(AggregatorFactory.class), new AggregatorFactories.Builder(), Collections.emptyMap()); builder = new CumulativeCardinalityPipelineAggregationBuilder("name", "valid"); builder.validate(parent, Collections.emptySet(), aggBuilders); @@ -145,7 +145,7 @@ public class CumulativeCardinalityAggregatorTests extends AggregatorTestCase { AutoDateHistogramAggregationBuilder.RoundingInfo[] roundings = new AutoDateHistogramAggregationBuilder.RoundingInfo[1]; parent = new AutoDateHistogramAggregatorFactory("name", numericVS, 1, roundings, - mock(SearchContext.class), null, new AggregatorFactories.Builder(), Collections.emptyMap()); + mock(QueryShardContext.class), null, new AggregatorFactories.Builder(), Collections.emptyMap()); builder = new CumulativeCardinalityPipelineAggregationBuilder("name", "valid"); builder.validate(parent, Collections.emptySet(), aggBuilders); @@ -226,32 +226,4 @@ public class CumulativeCardinalityAggregatorTests extends AggregatorTestCase { private static long asLong(String dateTime) { return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); } - - - private static AggregatorFactory getRandomSequentiallyOrderedParentAgg() throws IOException { - AggregatorFactory factory; - ValuesSourceConfig valuesSourceConfig = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC); - switch (randomIntBetween(0, 2)) { - case 0: - factory = new HistogramAggregatorFactory("name", valuesSourceConfig, 0.0d, 0.0d, - mock(InternalOrder.class), false, 0L, 0.0d, 1.0d, mock(SearchContext.class), null, - new AggregatorFactories.Builder(), Collections.emptyMap()); - break; - case 1: - factory = new DateHistogramAggregatorFactory("name", valuesSourceConfig, 0L, - mock(InternalOrder.class), false, 0L, mock(Rounding.class), mock(Rounding.class), - mock(ExtendedBounds.class), mock(SearchContext.class), mock(AggregatorFactory.class), - new AggregatorFactories.Builder(), Collections.emptyMap()); - break; - case 2: - default: - ValuesSourceConfig numericVS = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC); - AutoDateHistogramAggregationBuilder.RoundingInfo[] roundings = new AutoDateHistogramAggregationBuilder.RoundingInfo[1]; - factory = new AutoDateHistogramAggregatorFactory("name", numericVS, - 1, roundings, - mock(SearchContext.class), null, new AggregatorFactories.Builder(), Collections.emptyMap()); - } - - return factory; - } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index f259a9e7c58..34883d61c6b 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -45,6 +45,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.seqno.RetentionLeaseActions; +import org.elasticsearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException; import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.ShardId; @@ -470,11 +471,13 @@ public class ShardFollowTasksExecutor extends PersistentTasksExecutor indices, MetaData metaData) { throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); } - @Override public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, List shardFailures, long repositoryStateId, boolean includeGlobalState, - Map userMetadata) { + MetaData metaData, Map userMetadata) { throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); } @@ -296,7 +296,7 @@ public class CcrRepository extends AbstractLifecycleComponent implements Reposit @Override public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, - IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); } @@ -336,10 +336,13 @@ public class CcrRepository extends AbstractLifecycleComponent implements Reposit ActionListener.wrap( r -> {}, e -> { - assert e instanceof ElasticsearchSecurityException == false : e; - logger.warn(new ParameterizedMessage( - "{} background renewal of retention lease [{}] failed during restore", shardId, - retentionLeaseId), e); + final Throwable cause = ExceptionsHelper.unwrapCause(e); + assert cause instanceof ElasticsearchSecurityException == false : cause; + if (cause instanceof RetentionLeaseInvalidRetainingSeqNoException == false) { + logger.warn(new ParameterizedMessage( + "{} background renewal of retention lease [{}] failed during restore", shardId, + retentionLeaseId), cause); + } })); } }, diff --git a/x-pack/plugin/core/licenses/httpclient-4.5.10.jar.sha1 b/x-pack/plugin/core/licenses/httpclient-4.5.10.jar.sha1 new file mode 100644 index 00000000000..b708efd0dd5 --- /dev/null +++ b/x-pack/plugin/core/licenses/httpclient-4.5.10.jar.sha1 @@ -0,0 +1 @@ +7ca2e4276f4ef95e4db725a8cd4a1d1e7585b9e5 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpclient-4.5.8.jar.sha1 b/x-pack/plugin/core/licenses/httpclient-4.5.8.jar.sha1 deleted file mode 100644 index 73f0d30c709..00000000000 --- a/x-pack/plugin/core/licenses/httpclient-4.5.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpcore-4.4.11.jar.sha1 b/x-pack/plugin/core/licenses/httpcore-4.4.11.jar.sha1 deleted file mode 100644 index 6d64372bfcc..00000000000 --- a/x-pack/plugin/core/licenses/httpcore-4.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de748cf874e4e193b42eceea9fe5574fabb9d4df \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpcore-4.4.12.jar.sha1 b/x-pack/plugin/core/licenses/httpcore-4.4.12.jar.sha1 new file mode 100644 index 00000000000..3c046171b30 --- /dev/null +++ b/x-pack/plugin/core/licenses/httpcore-4.4.12.jar.sha1 @@ -0,0 +1 @@ +21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpcore-nio-4.4.11.jar.sha1 b/x-pack/plugin/core/licenses/httpcore-nio-4.4.11.jar.sha1 deleted file mode 100644 index 9e8777cb3da..00000000000 --- a/x-pack/plugin/core/licenses/httpcore-nio-4.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7d0a97d01d39cff9aa3e6db81f21fddb2435f4e6 \ No newline at end of file diff --git a/x-pack/plugin/core/licenses/httpcore-nio-4.4.12.jar.sha1 b/x-pack/plugin/core/licenses/httpcore-nio-4.4.12.jar.sha1 new file mode 100644 index 00000000000..4de932dc5ac --- /dev/null +++ b/x-pack/plugin/core/licenses/httpcore-nio-4.4.12.jar.sha1 @@ -0,0 +1 @@ +84cd29eca842f31db02987cfedea245af020198b \ No newline at end of file diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index ddbf784520e..d493413dc1c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -70,6 +70,10 @@ public class XPackLicenseState { "Creating and Starting rollup jobs will no longer be allowed.", "Stopping/Deleting existing jobs, RollupCaps API and RollupSearch continue to function." }); + messages.put(XPackField.TRANSFORM, new String[] { + "Creating, starting, updating transforms will no longer be allowed.", + "Stopping/Deleting existing transforms continue to function." + }); messages.put(XPackField.ANALYTICS, new String[] { "Aggregations provided by Analytics plugin are no longer usable." }); @@ -594,11 +598,11 @@ public class XPackLicenseState { } /** - * Data Frame is always available as long as there is a valid license + * Transform is always available as long as there is a valid license * * @return true if the license is active */ - public synchronized boolean isDataFrameAllowed() { + public synchronized boolean isTransformAllowed() { return status.active; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java index f6c3124c9be..280e4a43445 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java @@ -15,6 +15,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.FilterDirectory; import org.apache.lucene.store.SimpleFSDirectory; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -24,6 +25,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.ShardLock; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.ReadOnlyEngine; @@ -35,12 +37,15 @@ import org.elasticsearch.repositories.FilterRepository; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; +import java.io.Closeable; import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.function.Function; import java.util.function.Supplier; @@ -78,41 +83,63 @@ public final class SourceOnlySnapshotRepository extends FilterRepository { // a _source only snapshot with a plain repository it will be just fine since we already set the // required engine, that the index is read-only and the mapping to a default mapping try { - MetaData.Builder builder = MetaData.builder(metaData); - for (IndexId indexId : indices) { - IndexMetaData index = metaData.index(indexId.getName()); - IndexMetaData.Builder indexMetadataBuilder = IndexMetaData.builder(index); - // for a minimal restore we basically disable indexing on all fields and only create an index - // that is valid from an operational perspective. ie. it will have all metadata fields like version/ - // seqID etc. and an indexed ID field such that we can potentially perform updates on them or delete documents. - ImmutableOpenMap mappings = index.getMappings(); - Iterator> iterator = mappings.iterator(); - while (iterator.hasNext()) { - ObjectObjectCursor next = iterator.next(); - // we don't need to obey any routing here stuff is read-only anyway and get is disabled - final String mapping = "{ \"" + next.key + "\": { \"enabled\": false, \"_meta\": " + next.value.source().string() - + " } }"; - indexMetadataBuilder.putMapping(next.key, mapping); - } - indexMetadataBuilder.settings(Settings.builder().put(index.getSettings()) - .put(SOURCE_ONLY.getKey(), true) - .put("index.blocks.write", true)); // read-only! - indexMetadataBuilder.settingsVersion(1 + indexMetadataBuilder.settingsVersion()); - builder.put(indexMetadataBuilder); - } - super.initializeSnapshot(snapshotId, indices, builder.build()); + super.initializeSnapshot(snapshotId, indices, metadataToSnapshot(indices, metaData)); } catch (IOException ex) { throw new UncheckedIOException(ex); } } + @Override + public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, + List shardFailures, long repositoryStateId, boolean includeGlobalState, MetaData metaData, + Map userMetadata) { + // we process the index metadata at snapshot time. This means if somebody tries to restore + // a _source only snapshot with a plain repository it will be just fine since we already set the + // required engine, that the index is read-only and the mapping to a default mapping + try { + return super.finalizeSnapshot(snapshotId, indices, startTime, failure, totalShards, shardFailures, repositoryStateId, + includeGlobalState, metadataToSnapshot(indices, metaData), userMetadata); + } catch (IOException ex) { + throw new UncheckedIOException(ex); + } + } + + private static MetaData metadataToSnapshot(List indices, MetaData metaData) throws IOException { + MetaData.Builder builder = MetaData.builder(metaData); + for (IndexId indexId : indices) { + IndexMetaData index = metaData.index(indexId.getName()); + IndexMetaData.Builder indexMetadataBuilder = IndexMetaData.builder(index); + // for a minimal restore we basically disable indexing on all fields and only create an index + // that is valid from an operational perspective. ie. it will have all metadata fields like version/ + // seqID etc. and an indexed ID field such that we can potentially perform updates on them or delete documents. + ImmutableOpenMap mappings = index.getMappings(); + Iterator> iterator = mappings.iterator(); + while (iterator.hasNext()) { + ObjectObjectCursor next = iterator.next(); + // we don't need to obey any routing here stuff is read-only anyway and get is disabled + final String mapping = "{ \"" + next.key + "\": { \"enabled\": false, \"_meta\": " + next.value.source().string() + + " } }"; + indexMetadataBuilder.putMapping(next.key, mapping); + } + indexMetadataBuilder.settings(Settings.builder().put(index.getSettings()) + .put(SOURCE_ONLY.getKey(), true) + .put("index.blocks.write", true)); // read-only! + indexMetadataBuilder.settingsVersion(1 + indexMetadataBuilder.settingsVersion()); + builder.put(indexMetadataBuilder); + } + return builder.build(); + } + + @Override public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, - IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, ActionListener listener) { if (mapperService.documentMapper() != null // if there is no mapping this is null && mapperService.documentMapper().sourceMapper().isComplete() == false) { - throw new IllegalStateException("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled " + - "or filters the source"); + listener.onFailure( + new IllegalStateException("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled " + + "or filters the source")); + return; } Directory unwrap = FilterDirectory.unwrap(store.directory()); if (unwrap instanceof FSDirectory == false) { @@ -121,7 +148,10 @@ public final class SourceOnlySnapshotRepository extends FilterRepository { Path dataPath = ((FSDirectory) unwrap).getDirectory().getParent(); // TODO should we have a snapshot tmp directory per shard that is maintained by the system? Path snapPath = dataPath.resolve(SNAPSHOT_DIR_NAME); - try (FSDirectory directory = new SimpleFSDirectory(snapPath)) { + final List toClose = new ArrayList<>(3); + try { + FSDirectory directory = new SimpleFSDirectory(snapPath); + toClose.add(directory); Store tempStore = new Store(store.shardId(), store.indexSettings(), directory, new ShardLock(store.shardId()) { @Override protected void closeInternal() { @@ -137,16 +167,20 @@ public final class SourceOnlySnapshotRepository extends FilterRepository { final long maxDoc = segmentInfos.totalMaxDoc(); tempStore.bootstrapNewHistory(maxDoc, maxDoc); store.incRef(); - try (DirectoryReader reader = DirectoryReader.open(tempStore.directory(), - Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name()))) { - IndexCommit indexCommit = reader.getIndexCommit(); - super.snapshotShard(tempStore, mapperService, snapshotId, indexId, indexCommit, snapshotStatus); - } finally { - store.decRef(); - } + toClose.add(store::decRef); + DirectoryReader reader = DirectoryReader.open(tempStore.directory(), + Collections.singletonMap(BlockTreeTermsReader.FST_MODE_KEY, BlockTreeTermsReader.FSTLoadMode.OFF_HEAP.name())); + toClose.add(reader); + IndexCommit indexCommit = reader.getIndexCommit(); + super.snapshotShard(tempStore, mapperService, snapshotId, indexId, indexCommit, snapshotStatus, + ActionListener.runBefore(listener, () -> IOUtils.close(toClose))); } catch (IOException e) { - // why on earth does this super method not declare IOException - throw new UncheckedIOException(e); + try { + IOUtils.close(toClose); + } catch (IOException ex) { + e.addSuppressed(ex); + } + listener.onFailure(e); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java index ec4fc7a87ad..4de7c51f8da 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java @@ -50,7 +50,7 @@ public final class ClientHelper { public static final String DEPRECATION_ORIGIN = "deprecation"; public static final String PERSISTENT_TASK_ORIGIN = "persistent_tasks"; public static final String ROLLUP_ORIGIN = "rollup"; - public static final String DATA_FRAME_ORIGIN = "data_frame"; + public static final String TRANSFORM_ORIGIN = "transform"; private ClientHelper() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index fbe2bac8dfd..be89ff401f9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -41,20 +41,6 @@ import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.beats.BeatsFeatureSetUsage; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.CCRFeatureSet; -import org.elasticsearch.xpack.core.dataframe.DataFrameFeatureSetUsage; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsAction; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction; -import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; -import org.elasticsearch.xpack.core.dataframe.transforms.SyncConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig; import org.elasticsearch.xpack.core.analytics.AnalyticsFeatureSetUsage; import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; import org.elasticsearch.xpack.core.enrich.action.DeleteEnrichPolicyAction; @@ -213,11 +199,25 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.action.DeleteSnapshotLifecycleAction; import org.elasticsearch.xpack.core.slm.action.ExecuteSnapshotLifecycleAction; import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleStatsAction; import org.elasticsearch.xpack.core.slm.action.PutSnapshotLifecycleAction; import org.elasticsearch.xpack.core.spatial.SpatialFeatureSetUsage; import org.elasticsearch.xpack.core.sql.SqlFeatureSetUsage; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction; +import org.elasticsearch.xpack.core.transform.TransformFeatureSetUsage; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.action.DeleteTransformAction; +import org.elasticsearch.xpack.core.transform.action.GetTransformsAction; +import org.elasticsearch.xpack.core.transform.action.GetTransformsStatsAction; +import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction; +import org.elasticsearch.xpack.core.transform.action.PutTransformAction; +import org.elasticsearch.xpack.core.transform.action.StartTransformAction; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; +import org.elasticsearch.xpack.core.transform.transforms.TransformState; +import org.elasticsearch.xpack.core.transform.transforms.SyncConfig; +import org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig; import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeAction; import org.elasticsearch.xpack.core.upgrade.actions.IndexUpgradeInfoAction; import org.elasticsearch.xpack.core.vectors.VectorsFeatureSetUsage; @@ -421,17 +421,17 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl GetSnapshotLifecycleAction.INSTANCE, DeleteSnapshotLifecycleAction.INSTANCE, ExecuteSnapshotLifecycleAction.INSTANCE, + GetSnapshotLifecycleStatsAction.INSTANCE, // Freeze FreezeIndexAction.INSTANCE, // Data Frame - PutDataFrameTransformAction.INSTANCE, - StartDataFrameTransformAction.INSTANCE, - StartDataFrameTransformTaskAction.INSTANCE, - StopDataFrameTransformAction.INSTANCE, - DeleteDataFrameTransformAction.INSTANCE, - GetDataFrameTransformsAction.INSTANCE, - GetDataFrameTransformsStatsAction.INSTANCE, - PreviewDataFrameTransformAction.INSTANCE, + PutTransformAction.INSTANCE, + StartTransformAction.INSTANCE, + StopTransformAction.INSTANCE, + DeleteTransformAction.INSTANCE, + GetTransformsAction.INSTANCE, + GetTransformsStatsAction.INSTANCE, + PreviewTransformAction.INSTANCE, // enrich DeleteEnrichPolicyAction.INSTANCE, ExecuteEnrichPolicyAction.INSTANCE, @@ -541,11 +541,11 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl new NamedWriteableRegistry.Entry(LifecycleAction.class, SetPriorityAction.NAME, SetPriorityAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, UnfollowAction.NAME, UnfollowAction::new), // Data Frame - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_FRAME, DataFrameFeatureSetUsage::new), - new NamedWriteableRegistry.Entry(PersistentTaskParams.class, DataFrameField.TASK_NAME, DataFrameTransform::new), - new NamedWriteableRegistry.Entry(Task.Status.class, DataFrameField.TASK_NAME, DataFrameTransformState::new), - new NamedWriteableRegistry.Entry(PersistentTaskState.class, DataFrameField.TASK_NAME, DataFrameTransformState::new), - new NamedWriteableRegistry.Entry(SyncConfig.class, DataFrameField.TIME_BASED_SYNC.getPreferredName(), TimeSyncConfig::new), + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.TRANSFORM, TransformFeatureSetUsage::new), + new NamedWriteableRegistry.Entry(PersistentTaskParams.class, TransformField.TASK_NAME, TransformTaskParams::new), + new NamedWriteableRegistry.Entry(Task.Status.class, TransformField.TASK_NAME, TransformState::new), + new NamedWriteableRegistry.Entry(PersistentTaskState.class, TransformField.TASK_NAME, TransformState::new), + new NamedWriteableRegistry.Entry(SyncConfig.class, TransformField.TIME_BASED_SYNC.getPreferredName(), TimeSyncConfig::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.FLATTENED, FlattenedFeatureSetUsage::new), // Vectors new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.VECTORS, VectorsFeatureSetUsage::new), @@ -592,12 +592,12 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(RollupJobStatus.NAME), RollupJobStatus::fromXContent), // Data Frame - new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(DataFrameField.TASK_NAME), - DataFrameTransform::fromXContent), - new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(DataFrameField.TASK_NAME), - DataFrameTransformState::fromXContent), - new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(DataFrameField.TASK_NAME), - DataFrameTransformState::fromXContent) + new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(TransformField.TASK_NAME), + TransformTaskParams::fromXContent), + new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(TransformField.TASK_NAME), + TransformState::fromXContent), + new NamedXContentRegistry.Entry(PersistentTaskState.class, new ParseField(TransformField.TASK_NAME), + TransformState::fromXContent) ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 0dc58b55ae9..ae4050bee7b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -35,8 +35,8 @@ public final class XPackField { public static final String INDEX_LIFECYCLE = "ilm"; /** Name constant for the CCR feature. */ public static final String CCR = "ccr"; - /** Name constant for the data frame feature. */ - public static final String DATA_FRAME = "data_frame"; + /** Name constant for the transform feature. */ + public static final String TRANSFORM = "transform"; /** Name constant for flattened fields. */ public static final String FLATTENED = "flattened"; /** Name constant for the vectors feature. */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 5daab17fd69..814bd922403 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -50,8 +50,11 @@ public class XPackSettings { */ public static final Setting CCR_ENABLED_SETTING = Setting.boolSetting("xpack.ccr.enabled", true, Property.NodeScope); - /** Setting for enabling or disabling data frame. Defaults to true. */ - public static final Setting DATA_FRAME_ENABLED = Setting.boolSetting("xpack.data_frame.enabled", true, + /** Setting for enabling or disabling transform. Defaults to true. */ + @Deprecated // replaced by TRANSFORM_ENABLED + private static final Setting DATA_FRAME_ENABLED = Setting.boolSetting("xpack.data_frame.enabled", true, + Setting.Property.NodeScope, Setting.Property.Deprecated); + public static final Setting TRANSFORM_ENABLED = Setting.boolSetting("xpack.transform.enabled", DATA_FRAME_ENABLED, Setting.Property.NodeScope); /** Setting for enabling or disabling security. Defaults to true. */ @@ -97,6 +100,12 @@ public class XPackSettings { public static final Setting INDEX_LIFECYCLE_ENABLED = Setting.boolSetting("xpack.ilm.enabled", true, Setting.Property.NodeScope); + /** + * Setting for enabling or disabling the snapshot lifecycle extension. Defaults to true. + */ + public static final Setting SNAPSHOT_LIFECYCLE_ENABLED = Setting.boolSetting("xpack.slm.enabled", true, + Setting.Property.NodeScope); + /** Setting for enabling or disabling TLS. Defaults to false. */ public static final Setting TRANSPORT_SSL_ENABLED = Setting.boolSetting("xpack.security.transport.ssl.enabled", false, Property.NodeScope); @@ -260,7 +269,9 @@ public class XPackSettings { settings.add(ROLLUP_ENABLED); settings.add(PASSWORD_HASHING_ALGORITHM); settings.add(INDEX_LIFECYCLE_ENABLED); + settings.add(SNAPSHOT_LIFECYCLE_ENABLED); settings.add(DATA_FRAME_ENABLED); + settings.add(TRANSFORM_ENABLED); settings.add(FLATTENED_ENABLED); settings.add(VECTORS_ENABLED); return Collections.unmodifiableList(settings); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessage.java index f43c745a797..511fca2ce2c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessage.java @@ -27,6 +27,7 @@ public abstract class AbstractAuditMessage implements ToXContentObject { public static final ParseField LEVEL = new ParseField("level"); public static final ParseField TIMESTAMP = new ParseField("timestamp"); public static final ParseField NODE_NAME = new ParseField("node_name"); + public static final ParseField JOB_TYPE = new ParseField("job_type"); protected static final ConstructingObjectParser createParser( String name, AbstractAuditMessageFactory messageFactory, ParseField resourceField) { @@ -99,13 +100,17 @@ public abstract class AbstractAuditMessage implements ToXContentObject { if (nodeName != null) { builder.field(NODE_NAME.getPreferredName(), nodeName); } + String jobType = getJobType(); + if (jobType != null) { + builder.field(JOB_TYPE.getPreferredName(), jobType); + } builder.endObject(); return builder; } @Override public int hashCode() { - return Objects.hash(resourceId, message, level, timestamp, nodeName); + return Objects.hash(resourceId, message, level, timestamp, nodeName, getJobType()); } @Override @@ -122,8 +127,17 @@ public abstract class AbstractAuditMessage implements ToXContentObject { Objects.equals(message, other.message) && Objects.equals(level, other.level) && Objects.equals(timestamp, other.timestamp) && - Objects.equals(nodeName, other.nodeName); + Objects.equals(nodeName, other.nodeName) && + Objects.equals(getJobType(), other.getJobType()); } + /** + * @return job type string used to tell apart jobs of different types stored in the same index + */ + public abstract String getJobType(); + + /** + * @return resource id field name used when storing a new message + */ protected abstract String getResourceField(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java deleted file mode 100644 index 32f639a1fac..00000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessages.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.core.dataframe; - -import java.text.MessageFormat; -import java.util.Locale; - -public class DataFrameMessages { - - public static final String REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_TIMEOUT = - "Timed out after [{0}] while waiting for data frame transform [{1}] to stop"; - public static final String REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_INTERRUPT = - "Interrupted while waiting for data frame transform [{0}] to stop"; - public static final String REST_PUT_DATA_FRAME_TRANSFORM_EXISTS = "Transform with id [{0}] already exists"; - public static final String REST_DATA_FRAME_UNKNOWN_TRANSFORM = "Transform with id [{0}] could not be found"; - public static final String REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION = - "Failed to validate data frame configuration"; - public static final String REST_PUT_DATA_FRAME_FAILED_PERSIST_TRANSFORM_CONFIGURATION = "Failed to persist data frame configuration"; - public static final String REST_PUT_DATA_FRAME_FAILED_TO_DEDUCE_DEST_MAPPINGS = "Failed to deduce dest mappings"; - public static final String REST_PUT_DATA_FRAME_SOURCE_INDEX_MISSING = "Source index [{0}] does not exist"; - public static final String REST_PUT_DATA_FRAME_DEST_IN_SOURCE = "Destination index [{0}] is included in source expression [{1}]"; - public static final String REST_PUT_DATA_FRAME_DEST_SINGLE_INDEX = "Destination index [{0}] should refer to a single index"; - public static final String REST_PUT_DATA_FRAME_INCONSISTENT_ID = - "Inconsistent id; ''{0}'' specified in the body differs from ''{1}'' specified as a URL argument"; - public static final String DATA_FRAME_CONFIG_INVALID = "Data frame transform configuration is invalid [{0}]"; - public static final String REST_DATA_FRAME_FAILED_TO_SERIALIZE_TRANSFORM = "Failed to serialise transform [{0}]"; - public static final String DATA_FRAME_FAILED_TO_PERSIST_STATS = "Failed to persist data frame statistics for transform [{0}]"; - public static final String DATA_FRAME_UNKNOWN_TRANSFORM_STATS = "Statistics for transform [{0}] could not be found"; - - public static final String DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM = - "Unable to stop data frame transform [{0}] as it is in a failed state with reason [{1}]." + - " Use force stop to stop the data frame transform."; - public static final String DATA_FRAME_CANNOT_START_FAILED_TRANSFORM = - "Unable to start data frame transform [{0}] as it is in a failed state with failure: [{1}]. " + - "Use force start to restart data frame transform once error is resolved."; - - public static final String FAILED_TO_CREATE_DESTINATION_INDEX = "Could not create destination index [{0}] for transform [{1}]"; - public static final String FAILED_TO_RELOAD_TRANSFORM_CONFIGURATION = - "Failed to reload data frame transform configuration for transform [{0}]"; - public static final String FAILED_TO_LOAD_TRANSFORM_CONFIGURATION = - "Failed to load data frame transform configuration for transform [{0}]"; - public static final String FAILED_TO_PARSE_TRANSFORM_CONFIGURATION = - "Failed to parse transform configuration for data frame transform [{0}]"; - public static final String FAILED_TO_PARSE_TRANSFORM_STATISTICS_CONFIGURATION = - "Failed to parse transform statistics for data frame transform [{0}]"; - public static final String FAILED_TO_LOAD_TRANSFORM_CHECKPOINT = - "Failed to load data frame transform checkpoint for transform [{0}]"; - public static final String FAILED_TO_LOAD_TRANSFORM_STATE = - "Failed to load data frame transform state for transform [{0}]"; - public static final String DATA_FRAME_TRANSFORM_CONFIGURATION_NO_TRANSFORM = - "Data frame transform configuration must specify exactly 1 function"; - public static final String DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_GROUP_BY = - "Data frame pivot transform configuration must specify at least 1 group_by"; - public static final String DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_AGGREGATION = - "Data frame pivot transform configuration must specify at least 1 aggregation"; - public static final String DATA_FRAME_TRANSFORM_PIVOT_FAILED_TO_CREATE_COMPOSITE_AGGREGATION = - "Failed to create composite aggregation from pivot function"; - public static final String DATA_FRAME_TRANSFORM_CONFIGURATION_INVALID = - "Data frame transform configuration [{0}] has invalid elements"; - public static final String DATA_FRAME_UNABLE_TO_GATHER_FIELD_MAPPINGS = "Failed to gather field mappings for index [{0}]"; - public static final String DATA_FRAME_UPDATE_CANNOT_CHANGE_SYNC_METHOD = - "Cannot change the current sync configuration of transform [{0}] from [{1}] to [{2}]"; - public static final String LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_QUERY = - "Failed to parse query for data frame transform"; - public static final String LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_GROUP_BY = - "Failed to parse group_by for data frame pivot transform"; - public static final String LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_AGGREGATION = - "Failed to parse aggregation for data frame pivot transform"; - public static final String LOG_DATA_FRAME_TRANSFORM_PIVOT_REDUCE_PAGE_SIZE = - "Insufficient memory for search, reducing number of buckets per search from [{0}] to [{1}]"; - public static final String LOG_DATA_FRAME_TRANSFORM_PIVOT_LOW_PAGE_SIZE_FAILURE = - "Insufficient memory for search after repeated page size reductions to [{0}], unable to continue pivot, " - + "please simplify job or increase heap size on data nodes."; - - public static final String FAILED_TO_PARSE_TRANSFORM_CHECKPOINTS = - "Failed to parse transform checkpoints for [{0}]"; - - - public static final String ID_TOO_LONG = "The id cannot contain more than {0} characters."; - public static final String INVALID_ID = "Invalid {0}; ''{1}'' can contain lowercase alphanumeric (a-z and 0-9), hyphens or " + - "underscores; must start and end with alphanumeric"; - private DataFrameMessages() { - } - - /** - * Returns the message parameter - * - * @param message Should be one of the statics defined in this class - */ - public static String getMessage(String message) { - return message; - } - - /** - * Format the message with the supplied arguments - * - * @param message Should be one of the statics defined in this class - * @param args MessageFormat arguments. See {@linkplain MessageFormat#format(Object)}] - */ - public static String getMessage(String message, Object... args) { - return new MessageFormat(message, Locale.ROOT).format(args); - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java deleted file mode 100644 index 4fe87d9727f..00000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.core.dataframe.action; - -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.support.tasks.BaseTasksRequest; -import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; - -import java.io.IOException; -import java.util.Collections; -import java.util.Objects; - -public class StartDataFrameTransformTaskAction extends ActionType { - - public static final StartDataFrameTransformTaskAction INSTANCE = new StartDataFrameTransformTaskAction(); - public static final String NAME = "cluster:admin/data_frame/start_task"; - - private StartDataFrameTransformTaskAction() { - super(NAME, StartDataFrameTransformTaskAction.Response::new); - } - - public static class Request extends BaseTasksRequest { - - private final String id; - private final boolean force; - - public Request(String id, boolean force) { - this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); - this.force = force; - } - - public Request(StreamInput in) throws IOException { - super(in); - id = in.readString(); - if (in.getVersion().onOrAfter(Version.V_7_4_0)) { - force = in.readBoolean(); - } else { - // The behavior before V_7_4_0 was that this flag did not exist, - // assuming previous checks allowed this task to be started. - force = true; - } - } - - public String getId() { - return id; - } - - public boolean isForce() { - return force; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(id); - if (out.getVersion().onOrAfter(Version.V_7_4_0)) { - out.writeBoolean(force); - } - } - - @Override - public boolean match(Task task) { - return task.getDescription().equals(DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + id); - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public int hashCode() { - return Objects.hash(id, force); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - Request other = (Request) obj; - return Objects.equals(id, other.id) && force == other.force; - } - } - - public static class Response extends BaseTasksResponse implements ToXContentObject { - private final boolean started; - - public Response(StreamInput in) throws IOException { - super(in); - started = in.readBoolean(); - } - - public Response(boolean started) { - super(Collections.emptyList(), Collections.emptyList()); - this.started = started; - } - - public boolean isStarted() { - return started; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(started); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - toXContentCommon(builder, params); - builder.field("started", started); - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - Response response = (Response) obj; - return started == response.started; - } - - @Override - public int hashCode() { - return Objects.hash(started); - } - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/InitializePolicyContextStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/InitializePolicyContextStep.java index 7905c460a5e..c0ba7ba5411 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/InitializePolicyContextStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/InitializePolicyContextStep.java @@ -22,7 +22,7 @@ public final class InitializePolicyContextStep extends ClusterStateActionStep { public static final StepKey KEY = new StepKey(INITIALIZATION_PHASE, "init", "init"); private static final Logger logger = LogManager.getLogger(InitializePolicyContextStep.class); - public InitializePolicyContextStep(Step.StepKey key, StepKey nextStepKey) { + InitializePolicyContextStep(Step.StepKey key, StepKey nextStepKey) { super(key, nextStepKey); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleSettings.java index 21299678e9a..bec63ed654f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleSettings.java @@ -5,8 +5,10 @@ */ package org.elasticsearch.xpack.core.ilm; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.scheduler.CronSchedule; /** * Class encapsulating settings related to Index Lifecycle Management X-Pack Plugin @@ -15,7 +17,12 @@ public class LifecycleSettings { public static final String LIFECYCLE_POLL_INTERVAL = "indices.lifecycle.poll_interval"; public static final String LIFECYCLE_NAME = "index.lifecycle.name"; public static final String LIFECYCLE_INDEXING_COMPLETE = "index.lifecycle.indexing_complete"; + public static final String LIFECYCLE_ORIGINATION_DATE = "index.lifecycle.origination_date"; + public static final String SLM_HISTORY_INDEX_ENABLED = "slm.history_index_enabled"; + public static final String SLM_RETENTION_SCHEDULE = "slm.retention_schedule"; + public static final String SLM_RETENTION_DURATION = "slm.retention_duration"; + public static final Setting LIFECYCLE_POLL_INTERVAL_SETTING = Setting.positiveTimeSetting(LIFECYCLE_POLL_INTERVAL, TimeValue.timeValueMinutes(10), Setting.Property.Dynamic, Setting.Property.NodeScope); @@ -23,7 +30,22 @@ public class LifecycleSettings { Setting.Property.Dynamic, Setting.Property.IndexScope); public static final Setting LIFECYCLE_INDEXING_COMPLETE_SETTING = Setting.boolSetting(LIFECYCLE_INDEXING_COMPLETE, false, Setting.Property.Dynamic, Setting.Property.IndexScope); + public static final Setting LIFECYCLE_ORIGINATION_DATE_SETTING = + Setting.longSetting(LIFECYCLE_ORIGINATION_DATE, -1, -1, Setting.Property.Dynamic, Setting.Property.IndexScope); public static final Setting SLM_HISTORY_INDEX_ENABLED_SETTING = Setting.boolSetting(SLM_HISTORY_INDEX_ENABLED, true, Setting.Property.NodeScope); + public static final Setting SLM_RETENTION_SCHEDULE_SETTING = Setting.simpleString(SLM_RETENTION_SCHEDULE, str -> { + try { + if (Strings.hasText(str)) { + // Test that the setting is a valid cron syntax + new CronSchedule(str); + } + } catch (Exception e) { + throw new IllegalArgumentException("invalid cron expression [" + str + "] for SLM retention schedule [" + + SLM_RETENTION_SCHEDULE + "]", e); + } + }, Setting.Property.Dynamic, Setting.Property.NodeScope); + public static final Setting SLM_RETENTION_DURATION_SETTING = Setting.timeSetting(SLM_RETENTION_DURATION, + TimeValue.timeValueHours(1), TimeValue.timeValueMillis(500), Setting.Property.Dynamic, Setting.Property.NodeScope); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java index ab877b36b88..ab543839691 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SegmentCountStep.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.segments.IndexSegments; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.segments.ShardSegments; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; @@ -50,19 +51,32 @@ public class SegmentCountStep extends AsyncWaitStep { public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { getClient().admin().indices().segments(new IndicesSegmentsRequest(indexMetaData.getIndex().getName()), ActionListener.wrap(response -> { - IndexSegments segments = response.getIndices().get(indexMetaData.getIndex().getName()); - List unmergedShards = segments.getShards().values().stream() - .flatMap(iss -> Arrays.stream(iss.getShards())) - .filter(shardSegments -> shardSegments.getSegments().size() > maxNumSegments) - .collect(Collectors.toList()); - if (unmergedShards.size() > 0) { - Map unmergedShardCounts = unmergedShards.stream() - .collect(Collectors.toMap(ShardSegments::getShardRouting, ss -> ss.getSegments().size())); - logger.info("[{}] best effort force merge to [{}] segments did not succeed for {} shards: {}", - indexMetaData.getIndex().getName(), maxNumSegments, unmergedShards.size(), unmergedShardCounts); + IndexSegments idxSegments = response.getIndices().get(indexMetaData.getIndex().getName()); + if (idxSegments == null || (response.getShardFailures() != null && response.getShardFailures().length > 0)) { + final DefaultShardOperationFailedException[] failures = response.getShardFailures(); + logger.info("[{}] retrieval of segment counts after force merge did not succeed, " + + "there were {} shard failures. " + + "failures: {}", + indexMetaData.getIndex().getName(), + response.getFailedShards(), + failures == null ? "n/a" : Strings.collectionToDelimitedString(Arrays.stream(failures) + .map(Strings::toString) + .collect(Collectors.toList()), ",")); + listener.onResponse(true, new Info(-1)); + } else { + List unmergedShards = idxSegments.getShards().values().stream() + .flatMap(iss -> Arrays.stream(iss.getShards())) + .filter(shardSegments -> shardSegments.getSegments().size() > maxNumSegments) + .collect(Collectors.toList()); + if (unmergedShards.size() > 0) { + Map unmergedShardCounts = unmergedShards.stream() + .collect(Collectors.toMap(ShardSegments::getShardRouting, ss -> ss.getSegments().size())); + logger.info("[{}] best effort force merge to [{}] segments did not succeed for {} shards: {}", + indexMetaData.getIndex().getName(), maxNumSegments, unmergedShards.size(), unmergedShardCounts); + } + // Force merging is best effort, so always return true that the condition has been met. + listener.onResponse(true, new Info(unmergedShards.size())); } - // Force merging is best effort, so always return true that the condition has been met. - listener.onResponse(true, new Info(unmergedShards.size())); }, listener::onFailure)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/DataFrameAnalysis.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/DataFrameAnalysis.java index bc0e623cdeb..c1db48f0d97 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/DataFrameAnalysis.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/DataFrameAnalysis.java @@ -32,4 +32,9 @@ public interface DataFrameAnalysis extends ToXContentObject, NamedWriteable { * @return {@code true} if this analysis supports data frame rows with missing values */ boolean supportsMissingValues(); + + /** + * @return {@code true} if this analysis persists state that can later be used to restore from a given point + */ + boolean persistsState(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java index 35e3d234a7c..3ef5fa331bd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/OutlierDetection.java @@ -169,6 +169,11 @@ public class OutlierDetection implements DataFrameAnalysis { return false; } + @Override + public boolean persistsState() { + return false; + } + public enum Method { LOF, LDOF, DISTANCE_KTH_NN, DISTANCE_KNN; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java index 04a5801ffa2..fd7820217df 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java @@ -210,6 +210,11 @@ public class Regression implements DataFrameAnalysis { return true; } + @Override + public boolean persistsState() { + return true; + } + @Override public int hashCode() { return Objects.hash(dependentVariable, lambda, gamma, eta, maximumNumberTrees, featureBagFraction, predictionFieldName, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 9014724bf5a..4d33260053a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -56,6 +56,17 @@ public final class Messages { public static final String DATA_FRAME_ANALYTICS_BAD_QUERY_FORMAT = "Data Frame Analytics config query is not parsable"; public static final String DATA_FRAME_ANALYTICS_BAD_FIELD_FILTER = "No field [{0}] could be detected"; + public static final String DATA_FRAME_ANALYTICS_AUDIT_CREATED = "Created analytics with analysis type [{0}]"; + public static final String DATA_FRAME_ANALYTICS_AUDIT_STARTED = "Started analytics"; + public static final String DATA_FRAME_ANALYTICS_AUDIT_STOPPED = "Stopped analytics"; + public static final String DATA_FRAME_ANALYTICS_AUDIT_DELETED = "Deleted analytics"; + public static final String DATA_FRAME_ANALYTICS_AUDIT_UPDATED_STATE = "Successfully updated analytics task state to [{0}]"; + public static final String DATA_FRAME_ANALYTICS_AUDIT_ESTIMATED_MEMORY_USAGE = "Estimated memory usage for this analytics to be [{0}]"; + public static final String DATA_FRAME_ANALYTICS_AUDIT_CREATING_DEST_INDEX = "Creating destination index [{0}]"; + public static final String DATA_FRAME_ANALYTICS_AUDIT_REUSING_DEST_INDEX = "Using existing destination index [{0}]"; + public static final String DATA_FRAME_ANALYTICS_AUDIT_FINISHED_REINDEXING = "Finished reindexing to destination index [{0}]"; + public static final String DATA_FRAME_ANALYTICS_AUDIT_FINISHED_ANALYSIS = "Finished analysis"; + public static final String FILTER_CANNOT_DELETE = "Cannot delete filter [{0}] currently used by jobs {1}"; public static final String FILTER_CONTAINS_TOO_MANY_ITEMS = "Filter [{0}] contains too many items; up to [{1}] items are allowed"; public static final String FILTER_NOT_FOUND = "No filter with id [{0}] exists"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index 267d85e7205..804e9c8dcda 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -1121,12 +1121,13 @@ public class ElasticsearchMappings { XContentBuilder builder = jsonBuilder().startObject(); builder.startObject(SINGLE_MAPPING_NAME); addMetaInformation(builder); + builder.field(DYNAMIC, "false"); builder.startObject(PROPERTIES) .startObject(Job.ID.getPreferredName()) .field(TYPE, KEYWORD) .endObject() .startObject(AnomalyDetectionAuditMessage.LEVEL.getPreferredName()) - .field(TYPE, KEYWORD) + .field(TYPE, KEYWORD) .endObject() .startObject(AnomalyDetectionAuditMessage.MESSAGE.getPreferredName()) .field(TYPE, TEXT) @@ -1142,6 +1143,9 @@ public class ElasticsearchMappings { .startObject(AnomalyDetectionAuditMessage.NODE_NAME.getPreferredName()) .field(TYPE, KEYWORD) .endObject() + .startObject(AnomalyDetectionAuditMessage.JOB_TYPE.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() .endObject() .endObject() .endObject(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessage.java index 36c3828f323..f1c9570254b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessage.java @@ -23,6 +23,11 @@ public class AnomalyDetectionAuditMessage extends AbstractAuditMessage { super(resourceId, message, level, timestamp, nodeName); } + @Override + public final String getJobType() { + return Job.ANOMALY_DETECTOR_JOB_TYPE; + } + @Override protected String getResourceField() { return JOB_ID.getPreferredName(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditorField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditorField.java index ac83b7c37f5..307ff01fa45 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditorField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/AuditorField.java @@ -6,8 +6,8 @@ package org.elasticsearch.xpack.core.ml.notifications; public final class AuditorField { - public static final String NOTIFICATIONS_INDEX = ".ml-notifications"; + + public static final String NOTIFICATIONS_INDEX = ".ml-notifications-000001"; private AuditorField() {} - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/DataFrameAnalyticsAuditMessage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/DataFrameAnalyticsAuditMessage.java new file mode 100644 index 00000000000..0d19aac65f2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/notifications/DataFrameAnalyticsAuditMessage.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.notifications; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.xpack.core.common.notifications.AbstractAuditMessage; +import org.elasticsearch.xpack.core.common.notifications.Level; +import org.elasticsearch.xpack.core.ml.job.config.Job; + +import java.util.Date; + +public class DataFrameAnalyticsAuditMessage extends AbstractAuditMessage { + + private static final ParseField JOB_ID = Job.ID; + public static final ConstructingObjectParser PARSER = + createParser("ml_analytics_audit_message", DataFrameAnalyticsAuditMessage::new, JOB_ID); + + public DataFrameAnalyticsAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { + super(resourceId, message, level, timestamp, nodeName); + } + + @Override + public final String getJobType() { + return "data_frame_analytics"; + } + + @Override + protected String getResourceField() { + return JOB_ID.getPreferredName(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 9da60d985c6..a5c715ea41e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -123,6 +123,9 @@ public class ReservedRolesStore implements BiConsumer, ActionListene // .code_internal-* is for Code's internal worker queue index creation. RoleDescriptor.IndicesPrivileges.builder() .indices(".code-*", ".code_internal-*").privileges("all").build(), + // .apm-* is for APM's agent configuration index creation + RoleDescriptor.IndicesPrivileges.builder() + .indices(".apm-agent-configuration").privileges("all").build(), }, null, new ConfigurableClusterPrivilege[] { new ManageApplicationPrivileges(Collections.singleton("kibana-*")) }, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java index 7c64a0c3b9b..71e33f9c8d7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.XPackPlugin.XPackMetaDataCustom; import org.elasticsearch.xpack.core.ilm.OperationMode; +import org.elasticsearch.xpack.slm.SnapshotLifecycleStats; import java.io.IOException; import java.util.Collections; @@ -39,36 +40,51 @@ import java.util.stream.Collectors; public class SnapshotLifecycleMetadata implements XPackMetaDataCustom { public static final String TYPE = "snapshot_lifecycle"; - public static final ParseField OPERATION_MODE_FIELD = new ParseField("operation_mode"); - public static final ParseField POLICIES_FIELD = new ParseField("policies"); - public static final SnapshotLifecycleMetadata EMPTY = new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + private static final ParseField OPERATION_MODE_FIELD = new ParseField("operation_mode"); + private static final ParseField POLICIES_FIELD = new ParseField("policies"); + private static final ParseField STATS_FIELD = new ParseField("stats"); + + public static final SnapshotLifecycleMetadata EMPTY = + new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING, new SnapshotLifecycleStats()); @SuppressWarnings("unchecked") public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(TYPE, a -> new SnapshotLifecycleMetadata( ((List) a[0]).stream() .collect(Collectors.toMap(m -> m.getPolicy().getId(), Function.identity())), - OperationMode.valueOf((String) a[1]))); + OperationMode.valueOf((String) a[1]), + (SnapshotLifecycleStats) a[2])); static { PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> SnapshotLifecyclePolicyMetadata.parse(p, n), v -> { throw new IllegalArgumentException("ordered " + POLICIES_FIELD.getPreferredName() + " are not supported"); }, POLICIES_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), OPERATION_MODE_FIELD); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (v, o) -> SnapshotLifecycleStats.parse(v), STATS_FIELD); } private final Map snapshotConfigurations; private final OperationMode operationMode; + private final SnapshotLifecycleStats slmStats; - public SnapshotLifecycleMetadata(Map snapshotConfigurations, OperationMode operationMode) { + public SnapshotLifecycleMetadata(Map snapshotConfigurations, + OperationMode operationMode, + SnapshotLifecycleStats slmStats) { this.snapshotConfigurations = new HashMap<>(snapshotConfigurations); this.operationMode = operationMode; + this.slmStats = slmStats; } public SnapshotLifecycleMetadata(StreamInput in) throws IOException { this.snapshotConfigurations = in.readMap(StreamInput::readString, SnapshotLifecyclePolicyMetadata::new); this.operationMode = in.readEnum(OperationMode.class); + if (in.getVersion().onOrAfter(Version.V_7_5_0)) { + this.slmStats = new SnapshotLifecycleStats(in); + } else { + this.slmStats = new SnapshotLifecycleStats(); + } } public Map getSnapshotConfigurations() { @@ -79,6 +95,10 @@ public class SnapshotLifecycleMetadata implements XPackMetaDataCustom { return operationMode; } + public SnapshotLifecycleStats getStats() { + return this.slmStats; + } + @Override public EnumSet context() { return MetaData.ALL_CONTEXTS; @@ -103,12 +123,16 @@ public class SnapshotLifecycleMetadata implements XPackMetaDataCustom { public void writeTo(StreamOutput out) throws IOException { out.writeMap(this.snapshotConfigurations, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); out.writeEnum(this.operationMode); + if (out.getVersion().onOrAfter(Version.V_7_5_0)) { + this.slmStats.writeTo(out); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(POLICIES_FIELD.getPreferredName(), this.snapshotConfigurations); builder.field(OPERATION_MODE_FIELD.getPreferredName(), operationMode); + builder.field(STATS_FIELD.getPreferredName(), this.slmStats); return builder; } @@ -119,7 +143,7 @@ public class SnapshotLifecycleMetadata implements XPackMetaDataCustom { @Override public int hashCode() { - return Objects.hash(this.snapshotConfigurations, this.operationMode); + return Objects.hash(this.snapshotConfigurations, this.operationMode, this.slmStats); } @Override @@ -132,18 +156,21 @@ public class SnapshotLifecycleMetadata implements XPackMetaDataCustom { } SnapshotLifecycleMetadata other = (SnapshotLifecycleMetadata) obj; return this.snapshotConfigurations.equals(other.snapshotConfigurations) && - this.operationMode.equals(other.operationMode); + this.operationMode.equals(other.operationMode) && + this.slmStats.equals(other.slmStats); } public static class SnapshotLifecycleMetadataDiff implements NamedDiff { final Diff> lifecycles; final OperationMode operationMode; + final SnapshotLifecycleStats slmStats; SnapshotLifecycleMetadataDiff(SnapshotLifecycleMetadata before, SnapshotLifecycleMetadata after) { this.lifecycles = DiffableUtils.diff(before.snapshotConfigurations, after.snapshotConfigurations, DiffableUtils.getStringKeySerializer()); this.operationMode = after.operationMode; + this.slmStats = after.slmStats; } public SnapshotLifecycleMetadataDiff(StreamInput in) throws IOException { @@ -151,13 +178,18 @@ public class SnapshotLifecycleMetadata implements XPackMetaDataCustom { SnapshotLifecyclePolicyMetadata::new, SnapshotLifecycleMetadataDiff::readLifecyclePolicyDiffFrom); this.operationMode = in.readEnum(OperationMode.class); + if (in.getVersion().onOrAfter(Version.V_7_5_0)) { + this.slmStats = new SnapshotLifecycleStats(in); + } else { + this.slmStats = new SnapshotLifecycleStats(); + } } @Override public MetaData.Custom apply(MetaData.Custom part) { TreeMap newLifecycles = new TreeMap<>( lifecycles.apply(((SnapshotLifecycleMetadata) part).snapshotConfigurations)); - return new SnapshotLifecycleMetadata(newLifecycles, this.operationMode); + return new SnapshotLifecycleMetadata(newLifecycles, this.operationMode, this.slmStats); } @Override @@ -169,6 +201,9 @@ public class SnapshotLifecycleMetadata implements XPackMetaDataCustom { public void writeTo(StreamOutput out) throws IOException { lifecycles.writeTo(out); out.writeEnum(this.operationMode); + if (out.getVersion().onOrAfter(Version.V_7_5_0)) { + this.slmStats.writeTo(out); + } } static Diff readLifecyclePolicyDiffFrom(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java index ddb05ad1df1..e038d3bb6e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.slm; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -54,11 +55,13 @@ public class SnapshotLifecyclePolicy extends AbstractDiffable configuration; + private final SnapshotRetentionConfiguration retentionPolicy; private static final ParseField NAME = new ParseField("name"); private static final ParseField SCHEDULE = new ParseField("schedule"); private static final ParseField REPOSITORY = new ParseField("repository"); private static final ParseField CONFIG = new ParseField("config"); + private static final ParseField RETENTION = new ParseField("retention"); private static final IndexNameExpressionResolver.DateMathExpressionResolver DATE_MATH_RESOLVER = new IndexNameExpressionResolver.DateMathExpressionResolver(); private static final String METADATA_FIELD_NAME = "metadata"; @@ -71,7 +74,8 @@ public class SnapshotLifecyclePolicy extends AbstractDiffable config = (Map) a[3]; - return new SnapshotLifecyclePolicy(id, name, schedule, repo, config); + SnapshotRetentionConfiguration retention = (SnapshotRetentionConfiguration) a[4]; + return new SnapshotLifecyclePolicy(id, name, schedule, repo, config, retention); }); static { @@ -79,15 +83,18 @@ public class SnapshotLifecyclePolicy extends AbstractDiffable p.map(), CONFIG); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SnapshotRetentionConfiguration::parse, RETENTION); } public SnapshotLifecyclePolicy(final String id, final String name, final String schedule, - final String repository, @Nullable Map configuration) { + final String repository, @Nullable final Map configuration, + @Nullable final SnapshotRetentionConfiguration retentionPolicy) { this.id = Objects.requireNonNull(id, "policy id is required"); this.name = Objects.requireNonNull(name, "policy snapshot name is required"); this.schedule = Objects.requireNonNull(schedule, "policy schedule is required"); this.repository = Objects.requireNonNull(repository, "policy snapshot repository is required"); this.configuration = configuration; + this.retentionPolicy = retentionPolicy; } public SnapshotLifecyclePolicy(StreamInput in) throws IOException { @@ -96,6 +103,11 @@ public class SnapshotLifecyclePolicy extends AbstractDiffable PARSER = + new ConstructingObjectParser<>("snapshot_retention", true, a -> { + TimeValue expireAfter = a[0] == null ? null : TimeValue.parseTimeValue((String) a[0], EXPIRE_AFTER.getPreferredName()); + Integer minCount = (Integer) a[1]; + Integer maxCount = (Integer) a[2]; + return new SnapshotRetentionConfiguration(expireAfter, minCount, maxCount); + }); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), EXPIRE_AFTER); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MINIMUM_SNAPSHOT_COUNT); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), MAXIMUM_SNAPSHOT_COUNT); + } + + private final LongSupplier nowSupplier; + private final TimeValue expireAfter; + private final Integer minimumSnapshotCount; + private final Integer maximumSnapshotCount; + + SnapshotRetentionConfiguration(StreamInput in) throws IOException { + nowSupplier = System::currentTimeMillis; + this.expireAfter = in.readOptionalTimeValue(); + this.minimumSnapshotCount = in.readOptionalVInt(); + this.maximumSnapshotCount = in.readOptionalVInt(); + } + + public SnapshotRetentionConfiguration(@Nullable TimeValue expireAfter, + @Nullable Integer minimumSnapshotCount, + @Nullable Integer maximumSnapshotCount) { + this(System::currentTimeMillis, expireAfter, minimumSnapshotCount, maximumSnapshotCount); + } + + public SnapshotRetentionConfiguration(LongSupplier nowSupplier, + @Nullable TimeValue expireAfter, + @Nullable Integer minimumSnapshotCount, + @Nullable Integer maximumSnapshotCount) { + this.nowSupplier = nowSupplier; + this.expireAfter = expireAfter; + this.minimumSnapshotCount = minimumSnapshotCount; + this.maximumSnapshotCount = maximumSnapshotCount; + if (this.minimumSnapshotCount != null && this.minimumSnapshotCount < 1) { + throw new IllegalArgumentException("minimum snapshot count must be at least 1, but was: " + this.minimumSnapshotCount); + } + if (this.maximumSnapshotCount != null && this.maximumSnapshotCount < 1) { + throw new IllegalArgumentException("maximum snapshot count must be at least 1, but was: " + this.maximumSnapshotCount); + } + if ((maximumSnapshotCount != null && minimumSnapshotCount != null) && this.minimumSnapshotCount > this.maximumSnapshotCount) { + throw new IllegalArgumentException("minimum snapshot count " + this.minimumSnapshotCount + + " cannot be larger than maximum snapshot count " + this.maximumSnapshotCount); + } + } + + public static SnapshotRetentionConfiguration parse(XContentParser parser, String name) { + return PARSER.apply(parser, null); + } + + public TimeValue getExpireAfter() { + return this.expireAfter; + } + + public Integer getMinimumSnapshotCount() { + return this.minimumSnapshotCount; + } + + public Integer getMaximumSnapshotCount() { + return this.maximumSnapshotCount; + } + + /** + * Return a predicate by which a SnapshotInfo can be tested to see + * whether it should be deleted according to this retention policy. + * @param allSnapshots a list of all snapshot pertaining to this SLM policy and repository + */ + public Predicate getSnapshotDeletionPredicate(final List allSnapshots) { + final int snapCount = allSnapshots.size(); + List sortedSnapshots = allSnapshots.stream() + .sorted(Comparator.comparingLong(SnapshotInfo::startTime)) + .collect(Collectors.toList()); + + return si -> { + final String snapName = si.snapshotId().getName(); + + // First, enforce the maximum count, if the size is over the maximum number of + // snapshots, then allow the oldest N (where N is the number over the maximum snapshot + // count) snapshots to be eligible for deletion + if (this.maximumSnapshotCount != null) { + if (allSnapshots.size() > this.maximumSnapshotCount) { + int snapsToDelete = allSnapshots.size() - this.maximumSnapshotCount; + boolean eligible = sortedSnapshots.stream() + .limit(snapsToDelete) + .anyMatch(s -> s.equals(si)); + + if (eligible) { + logger.trace("[{}]: ELIGIBLE as it is one of the {} oldest snapshots with " + + "{} total snapshots, over the limit of {} maximum snapshots", + snapName, snapsToDelete, snapCount, this.maximumSnapshotCount); + return true; + } else { + logger.trace("[{}]: INELIGIBLE as it is not one of the {} oldest snapshots with " + + "{} total snapshots, over the limit of {} maximum snapshots", + snapName, snapsToDelete, snapCount, this.maximumSnapshotCount); + return false; + } + } + } + + // Next check the minimum count, since that is a blanket requirement regardless of time, + // if we haven't hit the minimum then we need to keep the snapshot regardless of + // expiration time + if (this.minimumSnapshotCount != null) { + if (allSnapshots.size() <= this.minimumSnapshotCount) { + logger.trace("[{}]: INELIGIBLE as there are {} snapshots and {} minimum snapshots needed", + snapName, snapCount, this.minimumSnapshotCount); + return false; + } + } + + // Finally, check the expiration time of the snapshot, if it is past, then it is + // eligible for deletion + if (this.expireAfter != null) { + TimeValue snapshotAge = new TimeValue(nowSupplier.getAsLong() - si.startTime()); + + if (this.minimumSnapshotCount != null) { + int eligibleForExpiration = snapCount - minimumSnapshotCount; + + // Only the oldest N snapshots are actually eligible, since if we went below this we + // would fall below the configured minimum number of snapshots to keep + Set snapsEligibleForExpiration = sortedSnapshots.stream() + .limit(eligibleForExpiration) + .collect(Collectors.toSet()); + + if (snapsEligibleForExpiration.contains(si) == false) { + // This snapshot is *not* one of the N oldest snapshots, so even if it were + // old enough, the other snapshots would be deleted before it + logger.trace("[{}]: INELIGIBLE as snapshot expiration would pass the " + + "minimum number of configured snapshots ({}) to keep, regardless of age", + snapName, this.minimumSnapshotCount); + return false; + } + } + + if (snapshotAge.compareTo(this.expireAfter) > 0) { + logger.trace("[{}]: ELIGIBLE as snapshot age of {} is older than {}", + snapName, snapshotAge.toHumanReadableString(3), this.expireAfter.toHumanReadableString(3)); + return true; + } else { + logger.trace("[{}]: INELIGIBLE as snapshot age of {} is newer than {}", + snapName, snapshotAge.toHumanReadableString(3), this.expireAfter.toHumanReadableString(3)); + return false; + } + } + // If nothing matched, the snapshot is not eligible for deletion + logger.trace("[{}]: INELIGIBLE as no retention predicates matched", snapName); + return false; + }; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalTimeValue(this.expireAfter); + out.writeOptionalVInt(this.minimumSnapshotCount); + out.writeOptionalVInt(this.maximumSnapshotCount); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (expireAfter != null) { + builder.field(EXPIRE_AFTER.getPreferredName(), expireAfter.getStringRep()); + } + if (minimumSnapshotCount != null) { + builder.field(MINIMUM_SNAPSHOT_COUNT.getPreferredName(), minimumSnapshotCount); + } + if (maximumSnapshotCount != null) { + builder.field(MAXIMUM_SNAPSHOT_COUNT.getPreferredName(), maximumSnapshotCount); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(expireAfter, minimumSnapshotCount, maximumSnapshotCount); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + SnapshotRetentionConfiguration other = (SnapshotRetentionConfiguration) obj; + return Objects.equals(this.expireAfter, other.expireAfter) && + Objects.equals(minimumSnapshotCount, other.minimumSnapshotCount) && + Objects.equals(maximumSnapshotCount, other.maximumSnapshotCount); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleAction.java index 96d7c19f56f..5821f19fc9b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleAction.java @@ -94,6 +94,10 @@ public class GetSnapshotLifecycleAction extends ActionType getPolicies() { + return this.lifecycles; + } + @Override public String toString() { return Strings.toString(this); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleStatsAction.java new file mode 100644 index 00000000000..ff37feb1164 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleStatsAction.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.slm.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.slm.SnapshotLifecycleStats; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class represents the action of retriving the stats for snapshot lifecycle management. + * These are retrieved from the master's cluster state and contain numbers related to the count of + * snapshots taken or deleted, as well as retention runs and time spent deleting snapshots. + */ +public class GetSnapshotLifecycleStatsAction extends ActionType { + public static final GetSnapshotLifecycleStatsAction INSTANCE = new GetSnapshotLifecycleStatsAction(); + public static final String NAME = "cluster:admin/slm/stats"; + + protected GetSnapshotLifecycleStatsAction() { + super(NAME, GetSnapshotLifecycleStatsAction.Response::new); + } + + public static class Request extends AcknowledgedRequest { + + public Request() { } + + public Request(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private SnapshotLifecycleStats slmStats; + + public Response() { } + + public Response(SnapshotLifecycleStats slmStats) { + this.slmStats = slmStats; + } + + public Response(StreamInput in) throws IOException { + this.slmStats = new SnapshotLifecycleStats(in); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return this.slmStats.toXContent(builder, params); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + this.slmStats.writeTo(out); + } + + @Override + public int hashCode() { + return Objects.hash(this.slmStats); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + GetSnapshotLifecycleStatsAction.Response other = (GetSnapshotLifecycleStatsAction.Response) obj; + return this.slmStats.equals(other.slmStats); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryItem.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryItem.java index 8bd51e88704..380eaa8a651 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryItem.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryItem.java @@ -15,7 +15,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -40,7 +39,10 @@ public class SnapshotHistoryItem implements Writeable, ToXContentObject { static final ParseField SNAPSHOT_NAME = new ParseField("snapshot_name"); static final ParseField OPERATION = new ParseField("operation"); static final ParseField SUCCESS = new ParseField("success"); - private static final String CREATE_OPERATION = "CREATE"; + + public static final String CREATE_OPERATION = "CREATE"; + public static final String DELETE_OPERATION = "DELETE"; + protected final long timestamp; protected final String policyId; protected final String repository; @@ -98,25 +100,34 @@ public class SnapshotHistoryItem implements Writeable, ToXContentObject { this.errorDetails = errorDetails; } - public static SnapshotHistoryItem successRecord(long timestamp, SnapshotLifecyclePolicy policy, String snapshotName) { + public static SnapshotHistoryItem creationSuccessRecord(long timestamp, SnapshotLifecyclePolicy policy, String snapshotName) { return new SnapshotHistoryItem(timestamp, policy.getId(), policy.getRepository(), snapshotName, CREATE_OPERATION, true, policy.getConfig(), null); } - public static SnapshotHistoryItem failureRecord(long timeStamp, SnapshotLifecyclePolicy policy, String snapshotName, - Exception exception) throws IOException { - ToXContent.Params stacktraceParams = new ToXContent.MapParams(Collections.singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false")); - String exceptionString; - try (XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder()) { - causeXContentBuilder.startObject(); - ElasticsearchException.generateThrowableXContent(causeXContentBuilder, stacktraceParams, exception); - causeXContentBuilder.endObject(); - exceptionString = BytesReference.bytes(causeXContentBuilder).utf8ToString(); - } + public static SnapshotHistoryItem creationFailureRecord(long timeStamp, SnapshotLifecyclePolicy policy, String snapshotName, + Exception exception) throws IOException { + String exceptionString = exceptionToString(exception); return new SnapshotHistoryItem(timeStamp, policy.getId(), policy.getRepository(), snapshotName, CREATE_OPERATION, false, policy.getConfig(), exceptionString); } + public static SnapshotHistoryItem deletionSuccessRecord(long timestamp, String snapshotName, String policyId, String repository) { + return new SnapshotHistoryItem(timestamp, policyId, repository, snapshotName, DELETE_OPERATION, true, null, null); + } + + public static SnapshotHistoryItem deletionPossibleSuccessRecord(long timestamp, String snapshotName, String policyId, String repository, + String details) { + return new SnapshotHistoryItem(timestamp, policyId, repository, snapshotName, DELETE_OPERATION, true, null, details); + } + + public static SnapshotHistoryItem deletionFailureRecord(long timestamp, String snapshotName, String policyId, String repository, + Exception exception) throws IOException { + String exceptionString = exceptionToString(exception); + return new SnapshotHistoryItem(timestamp, policyId, repository, snapshotName, DELETE_OPERATION, false, + null, exceptionString); + } + public SnapshotHistoryItem(StreamInput in) throws IOException { this.timestamp = in.readVLong(); this.policyId = in.readString(); @@ -220,4 +231,16 @@ public class SnapshotHistoryItem implements Writeable, ToXContentObject { public String toString() { return Strings.toString(this); } + + private static String exceptionToString(Exception exception) throws IOException { + Params stacktraceParams = new MapParams(Collections.singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false")); + String exceptionString; + try (XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder()) { + causeXContentBuilder.startObject(); + ElasticsearchException.generateThrowableXContent(causeXContentBuilder, stacktraceParams, exception); + causeXContentBuilder.endObject(); + exceptionString = BytesReference.bytes(causeXContentBuilder).utf8ToString(); + } + return exceptionString; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index 3a9e9892d08..21ac0f228ae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -101,7 +101,7 @@ public class SSLService { private final Map sslConfigurations; /** - * A mapping from a SSLConfiguration to a pre-built context. + * A mapping from an SSLConfiguration to a pre-built context. *

    * This is managed separately to the {@link #sslConfigurations} map, so that a single configuration (by object equality) * always maps to the same {@link SSLContextHolder}, even if it is being used within a different context-name. @@ -253,7 +253,7 @@ public class SSLService { String[] supportedProtocols = configuration.supportedProtocols().toArray(Strings.EMPTY_ARRAY); SSLParameters parameters = new SSLParameters(ciphers, supportedProtocols); if (configuration.verificationMode().isHostnameVerificationEnabled() && host != null) { - // By default, a SSLEngine will not perform hostname verification. In order to perform hostname verification + // By default, an SSLEngine will not perform hostname verification. In order to perform hostname verification // we need to specify a EndpointIdentificationAlgorithm. We use the HTTPS algorithm to prevent against // man in the middle attacks for all of our connections. parameters.setEndpointIdentificationAlgorithm("HTTPS"); @@ -303,7 +303,7 @@ public class SSLService { Objects.requireNonNull(sslConfiguration, "SSL Configuration cannot be null"); SSLContextHolder holder = sslContexts.get(sslConfiguration); if (holder == null) { - throw new IllegalArgumentException("did not find a SSLContext for [" + sslConfiguration.toString() + "]"); + throw new IllegalArgumentException("did not find an SSLContext for [" + sslConfiguration.toString() + "]"); } return holder; } @@ -636,7 +636,7 @@ public class SSLService { while (sessionIds.hasMoreElements()) { byte[] sessionId = sessionIds.nextElement(); SSLSession session = sslSessionContext.getSession(sessionId); - // a SSLSession could be null as there is no lock while iterating, the session cache + // an SSLSession could be null as there is no lock while iterating, the session cache // could have evicted a value, the session could be timed out, or the session could // have already been invalidated, which removes the value from the session cache in the // sun implementation diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java similarity index 77% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameFeatureSetUsage.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java index 481a367fcf3..2cd3864f673 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java @@ -4,35 +4,35 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe; +package org.elasticsearch.xpack.core.transform; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.XPackFeatureSet.Usage; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import java.io.IOException; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; -public class DataFrameFeatureSetUsage extends Usage { +public class TransformFeatureSetUsage extends Usage { private final Map transformCountByState; - private final DataFrameIndexerTransformStats accumulatedStats; + private final TransformIndexerStats accumulatedStats; - public DataFrameFeatureSetUsage(StreamInput in) throws IOException { + public TransformFeatureSetUsage(StreamInput in) throws IOException { super(in); this.transformCountByState = in.readMap(StreamInput::readString, StreamInput::readLong); - this.accumulatedStats = new DataFrameIndexerTransformStats(in); + this.accumulatedStats = new TransformIndexerStats(in); } - public DataFrameFeatureSetUsage(boolean available, boolean enabled, Map transformCountByState, - DataFrameIndexerTransformStats accumulatedStats) { - super(XPackField.DATA_FRAME, available, enabled); + public TransformFeatureSetUsage(boolean available, boolean enabled, Map transformCountByState, + TransformIndexerStats accumulatedStats) { + super(XPackField.TRANSFORM, available, enabled); this.transformCountByState = Objects.requireNonNull(transformCountByState); this.accumulatedStats = Objects.requireNonNull(accumulatedStats); } @@ -48,7 +48,7 @@ public class DataFrameFeatureSetUsage extends Usage { protected void innerXContent(XContentBuilder builder, Params params) throws IOException { super.innerXContent(builder, params); if (transformCountByState.isEmpty() == false) { - builder.startObject(DataFrameField.TRANSFORMS.getPreferredName()); + builder.startObject(TransformField.TRANSFORMS.getPreferredName()); long all = 0L; for (Entry entry : transformCountByState.entrySet()) { builder.field(entry.getKey(), entry.getValue()); @@ -58,7 +58,7 @@ public class DataFrameFeatureSetUsage extends Usage { builder.endObject(); // if there are no transforms, do not show any stats - builder.field(DataFrameField.STATS_FIELD.getPreferredName(), accumulatedStats); + builder.field(TransformField.STATS_FIELD.getPreferredName(), accumulatedStats); } } @@ -75,7 +75,7 @@ public class DataFrameFeatureSetUsage extends Usage { if (getClass() != obj.getClass()) { return false; } - DataFrameFeatureSetUsage other = (DataFrameFeatureSetUsage) obj; + TransformFeatureSetUsage other = (TransformFeatureSetUsage) obj; return Objects.equals(name, other.name) && available == other.available && enabled == other.enabled && Objects.equals(transformCountByState, other.transformCountByState) && Objects.equals(accumulatedStats, other.accumulatedStats); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformField.java similarity index 94% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformField.java index 56867ee9029..3e09577de4c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformField.java @@ -4,14 +4,14 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe; +package org.elasticsearch.xpack.core.transform; import org.elasticsearch.common.ParseField; /* * Utility class to hold common fields and strings for data frame. */ -public final class DataFrameField { +public final class TransformField { // common parse fields public static final ParseField AGGREGATIONS = new ParseField("aggregations"); @@ -65,12 +65,12 @@ public final class DataFrameField { public static final String PERSISTENT_TASK_DESCRIPTION_PREFIX = "data_frame_"; // strings for meta information - public static final String META_FIELDNAME = "_data_frame"; + public static final String META_FIELDNAME = "_transform"; public static final String CREATION_DATE_MILLIS = "creation_date_in_millis"; public static final String CREATED = "created"; public static final String CREATED_BY = "created_by"; public static final String TRANSFORM = "transform"; - public static final String DATA_FRAME_SIGNATURE = "data-frame-transform"; + public static final String TRANSFORM_SIGNATURE = "transform"; /** * Parameter to indicate whether we are serialising to X Content for internal storage. Default the field is invisible (e.g. for get @@ -81,6 +81,6 @@ public final class DataFrameField { // internal document id public static String DOCUMENT_ID_FIELD = "_id"; - private DataFrameField() { + private TransformField() { } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMessages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMessages.java new file mode 100644 index 00000000000..3bd18e1c283 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMessages.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.transform; + +import java.text.MessageFormat; +import java.util.Locale; + +public class TransformMessages { + + public static final String REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_TIMEOUT = + "Timed out after [{0}] while waiting for transform [{1}] to stop"; + public static final String REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_INTERRUPT = + "Interrupted while waiting for transform [{0}] to stop"; + public static final String REST_PUT_TRANSFORM_EXISTS = "Transform with id [{0}] already exists"; + public static final String REST_UNKNOWN_TRANSFORM = "Transform with id [{0}] could not be found"; + public static final String REST_PUT_TRANSFORM_FAILED_TO_VALIDATE_CONFIGURATION = + "Failed to validate configuration"; + public static final String REST_PUT_FAILED_PERSIST_TRANSFORM_CONFIGURATION = "Failed to persist transform configuration"; + public static final String REST_PUT_TRANSFORM_FAILED_TO_DEDUCE_DEST_MAPPINGS = "Failed to deduce dest mappings"; + public static final String REST_PUT_TRANSFORM_SOURCE_INDEX_MISSING = "Source index [{0}] does not exist"; + public static final String REST_PUT_TRANSFORM_DEST_IN_SOURCE = "Destination index [{0}] is included in source expression [{1}]"; + public static final String REST_PUT_TRANSFORM_DEST_SINGLE_INDEX = "Destination index [{0}] should refer to a single index"; + public static final String REST_PUT_TRANSFORM_INCONSISTENT_ID = + "Inconsistent id; ''{0}'' specified in the body differs from ''{1}'' specified as a URL argument"; + public static final String TRANSFORM_CONFIG_INVALID = "Transform configuration is invalid [{0}]"; + public static final String REST_FAILED_TO_SERIALIZE_TRANSFORM = "Failed to serialise transform [{0}]"; + public static final String TRANSFORM_FAILED_TO_PERSIST_STATS = "Failed to persist transform statistics for transform [{0}]"; + public static final String UNKNOWN_TRANSFORM_STATS = "Statistics for transform [{0}] could not be found"; + + public static final String CANNOT_STOP_FAILED_TRANSFORM = + "Unable to stop transform [{0}] as it is in a failed state with reason [{1}]." + + " Use force stop to stop the transform."; + public static final String CANNOT_START_FAILED_TRANSFORM = + "Unable to start transform [{0}] as it is in a failed state with failure: [{1}]. " + + "Use force stop and then restart the transform once error is resolved."; + + public static final String FAILED_TO_CREATE_DESTINATION_INDEX = "Could not create destination index [{0}] for transform [{1}]"; + public static final String FAILED_TO_RELOAD_TRANSFORM_CONFIGURATION = + "Failed to reload transform configuration for transform [{0}]"; + public static final String FAILED_TO_LOAD_TRANSFORM_CONFIGURATION = + "Failed to load transform configuration for transform [{0}]"; + public static final String FAILED_TO_PARSE_TRANSFORM_CONFIGURATION = + "Failed to parse transform configuration for transform [{0}]"; + public static final String FAILED_TO_PARSE_TRANSFORM_STATISTICS_CONFIGURATION = + "Failed to parse transform statistics for transform [{0}]"; + public static final String FAILED_TO_LOAD_TRANSFORM_CHECKPOINT = + "Failed to load transform checkpoint for transform [{0}]"; + public static final String FAILED_TO_LOAD_TRANSFORM_STATE = + "Failed to load transform state for transform [{0}]"; + public static final String TRANSFORM_CONFIGURATION_NO_TRANSFORM = + "Transform configuration must specify exactly 1 function"; + public static final String TRANSFORM_CONFIGURATION_PIVOT_NO_GROUP_BY = + "Pivot transform configuration must specify at least 1 group_by"; + public static final String TRANSFORM_CONFIGURATION_PIVOT_NO_AGGREGATION = + "Pivot transform configuration must specify at least 1 aggregation"; + public static final String TRANSFORM_PIVOT_FAILED_TO_CREATE_COMPOSITE_AGGREGATION = + "Failed to create composite aggregation from pivot function"; + public static final String TRANSFORM_CONFIGURATION_INVALID = + "Transform configuration [{0}] has invalid elements"; + public static final String UNABLE_TO_GATHER_FIELD_MAPPINGS = "Failed to gather field mappings for index [{0}]"; + public static final String TRANSFORM_UPDATE_CANNOT_CHANGE_SYNC_METHOD = + "Cannot change the current sync configuration of transform [{0}] from [{1}] to [{2}]"; + public static final String LOG_TRANSFORM_CONFIGURATION_BAD_QUERY = + "Failed to parse query for transform"; + public static final String LOG_TRANSFORM_CONFIGURATION_BAD_GROUP_BY = + "Failed to parse group_by for pivot transform"; + public static final String LOG_TRANSFORM_CONFIGURATION_BAD_AGGREGATION = + "Failed to parse aggregation for pivot transform"; + public static final String LOG_TRANSFORM_PIVOT_REDUCE_PAGE_SIZE = + "Insufficient memory for search, reducing number of buckets per search from [{0}] to [{1}]"; + public static final String LOG_TRANSFORM_PIVOT_LOW_PAGE_SIZE_FAILURE = + "Insufficient memory for search after repeated page size reductions to [{0}], unable to continue pivot, " + + "please simplify job or increase heap size on data nodes."; + + public static final String FAILED_TO_PARSE_TRANSFORM_CHECKPOINTS = + "Failed to parse transform checkpoints for [{0}]"; + + + public static final String ID_TOO_LONG = "The id cannot contain more than {0} characters."; + public static final String INVALID_ID = "Invalid {0}; ''{1}'' can contain lowercase alphanumeric (a-z and 0-9), hyphens or " + + "underscores; must start and end with alphanumeric"; + private TransformMessages() { + } + + /** + * Returns the message parameter + * + * @param message Should be one of the statics defined in this class + */ + public static String getMessage(String message) { + return message; + } + + /** + * Format the message with the supplied arguments + * + * @param message Should be one of the statics defined in this class + * @param args MessageFormat arguments. See {@linkplain MessageFormat#format(Object)}] + */ + public static String getMessage(String message, Object... args) { + return new MessageFormat(message, Locale.ROOT).format(args); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameNamedXContentProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformNamedXContentProvider.java similarity index 69% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameNamedXContentProvider.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformNamedXContentProvider.java index 9eacfc5ff1e..2dfc6d7f713 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameNamedXContentProvider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformNamedXContentProvider.java @@ -4,23 +4,23 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe; +package org.elasticsearch.xpack.core.transform; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.plugins.spi.NamedXContentProvider; -import org.elasticsearch.xpack.core.dataframe.transforms.SyncConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig; +import org.elasticsearch.xpack.core.transform.transforms.SyncConfig; +import org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig; import java.util.Arrays; import java.util.List; -public class DataFrameNamedXContentProvider implements NamedXContentProvider { +public class TransformNamedXContentProvider implements NamedXContentProvider { @Override public List getNamedXContentParsers() { return Arrays.asList( new NamedXContentRegistry.Entry(SyncConfig.class, - DataFrameField.TIME_BASED_SYNC, + TransformField.TIME_BASED_SYNC, TimeSyncConfig::parse)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java similarity index 82% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java index 19e2cd77704..9ca809c3974 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; @@ -12,18 +12,18 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; -public class DeleteDataFrameTransformAction extends ActionType { +public class DeleteTransformAction extends ActionType { - public static final DeleteDataFrameTransformAction INSTANCE = new DeleteDataFrameTransformAction(); + public static final DeleteTransformAction INSTANCE = new DeleteTransformAction(); public static final String NAME = "cluster:admin/data_frame/delete"; - private DeleteDataFrameTransformAction() { + private DeleteTransformAction() { super(NAME, AcknowledgedResponse::new); } @@ -32,7 +32,7 @@ public class DeleteDataFrameTransformAction extends ActionType { +public class GetTransformsAction extends ActionType { - public static final GetDataFrameTransformsAction INSTANCE = new GetDataFrameTransformsAction(); + public static final GetTransformsAction INSTANCE = new GetTransformsAction(); public static final String NAME = "cluster:monitor/data_frame/get"; private static final DeprecationLogger deprecationLogger = new DeprecationLogger( - LogManager.getLogger(GetDataFrameTransformsAction.class)); + LogManager.getLogger(GetTransformsAction.class)); - private GetDataFrameTransformsAction() { - super(NAME, GetDataFrameTransformsAction.Response::new); + private GetTransformsAction() { + super(NAME, GetTransformsAction.Response::new); } public static class Request extends AbstractGetResourcesRequest { @@ -72,17 +72,17 @@ public class GetDataFrameTransformsAction extends ActionType implements Writeable, ToXContentObject { + public static class Response extends AbstractGetResourcesResponse implements Writeable, ToXContentObject { public static final String INVALID_TRANSFORMS_DEPRECATION_WARNING = "Found [{}] invalid transforms"; private static final ParseField INVALID_TRANSFORMS = new ParseField("invalid_transforms"); - public Response(List transformConfigs, long count) { - super(new QueryPage<>(transformConfigs, count, DataFrameField.TRANSFORMS)); + public Response(List transformConfigs, long count) { + super(new QueryPage<>(transformConfigs, count, TransformField.TRANSFORMS)); } public Response() { @@ -93,7 +93,7 @@ public class GetDataFrameTransformsAction extends ActionType getTransformConfigurations() { + public List getTransformConfigurations() { return getResources().results(); } @@ -101,11 +101,11 @@ public class GetDataFrameTransformsAction extends ActionType invalidTransforms = new ArrayList<>(); builder.startObject(); - builder.field(DataFrameField.COUNT.getPreferredName(), getResources().count()); + builder.field(TransformField.COUNT.getPreferredName(), getResources().count()); // XContentBuilder does not support passing the params object for Iterables - builder.field(DataFrameField.TRANSFORMS.getPreferredName()); + builder.field(TransformField.TRANSFORMS.getPreferredName()); builder.startArray(); - for (DataFrameTransformConfig configResponse : getResources().results()) { + for (TransformConfig configResponse : getResources().results()) { configResponse.toXContent(builder, params); if (configResponse.isValid() == false) { invalidTransforms.add(configResponse.getId()); @@ -114,8 +114,8 @@ public class GetDataFrameTransformsAction extends ActionType getReader() { - return DataFrameTransformConfig::new; + protected Reader getReader() { + return TransformConfig::new; } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformsStatsAction.java similarity index 83% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformsStatsAction.java index 00170df7b15..a83f6cb53ba 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformsStatsAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -22,9 +22,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.action.util.QueryPage; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStats; -import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.transforms.TransformStats; +import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; import java.io.IOException; import java.util.ArrayList; @@ -34,12 +34,12 @@ import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class GetDataFrameTransformsStatsAction extends ActionType { +public class GetTransformsStatsAction extends ActionType { - public static final GetDataFrameTransformsStatsAction INSTANCE = new GetDataFrameTransformsStatsAction(); + public static final GetTransformsStatsAction INSTANCE = new GetTransformsStatsAction(); public static final String NAME = "cluster:monitor/data_frame/stats/get"; - public GetDataFrameTransformsStatsAction() { - super(NAME, GetDataFrameTransformsStatsAction.Response::new); + public GetTransformsStatsAction() { + super(NAME, GetTransformsStatsAction.Response::new); } public static class Request extends BaseTasksRequest { @@ -74,7 +74,7 @@ public class GetDataFrameTransformsStatsAction extends ActionType task.getDescription().equals(DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + transformId)); + .anyMatch(transformId -> task.getDescription().equals(TransformField.PERSISTENT_TASK_DESCRIPTION_PREFIX + transformId)); } public String getId() { @@ -147,24 +147,24 @@ public class GetDataFrameTransformsStatsAction extends ActionType transformsStats; + private final QueryPage transformsStats; - public Response(List transformStateAndStats, long count) { - this(new QueryPage<>(transformStateAndStats, count, DataFrameField.TRANSFORMS)); + public Response(List transformStateAndStats, long count) { + this(new QueryPage<>(transformStateAndStats, count, TransformField.TRANSFORMS)); } - public Response(List transformStateAndStats, + public Response(List transformStateAndStats, long count, List taskFailures, List nodeFailures) { - this(new QueryPage<>(transformStateAndStats, count, DataFrameField.TRANSFORMS), taskFailures, nodeFailures); + this(new QueryPage<>(transformStateAndStats, count, TransformField.TRANSFORMS), taskFailures, nodeFailures); } - private Response(QueryPage transformsStats) { + private Response(QueryPage transformsStats) { this(transformsStats, Collections.emptyList(), Collections.emptyList()); } - private Response(QueryPage transformsStats, + private Response(QueryPage transformsStats, List taskFailures, List nodeFailures) { super(taskFailures, nodeFailures); @@ -174,14 +174,14 @@ public class GetDataFrameTransformsStatsAction extends ActionType(in, DataFrameTransformStats::new); + transformsStats = new QueryPage<>(in, TransformStats::new); } else { - List stats = in.readList(DataFrameTransformStats::new); - transformsStats = new QueryPage<>(stats, stats.size(), DataFrameField.TRANSFORMS); + List stats = in.readList(TransformStats::new); + transformsStats = new QueryPage<>(stats, stats.size(), TransformField.TRANSFORMS); } } - public List getTransformsStats() { + public List getTransformsStats() { return transformsStats.results(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java similarity index 88% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java index d30db53746f..0a56c9481a0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; @@ -22,9 +22,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.DestConfig; import java.io.IOException; import java.util.ArrayList; @@ -36,26 +36,26 @@ import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -public class PreviewDataFrameTransformAction extends ActionType { +public class PreviewTransformAction extends ActionType { - public static final PreviewDataFrameTransformAction INSTANCE = new PreviewDataFrameTransformAction(); + public static final PreviewTransformAction INSTANCE = new PreviewTransformAction(); public static final String NAME = "cluster:admin/data_frame/preview"; - private PreviewDataFrameTransformAction() { - super(NAME, PreviewDataFrameTransformAction.Response::new); + private PreviewTransformAction() { + super(NAME, PreviewTransformAction.Response::new); } public static class Request extends AcknowledgedRequest implements ToXContentObject { - private final DataFrameTransformConfig config; + private final TransformConfig config; - public Request(DataFrameTransformConfig config) { + public Request(TransformConfig config) { this.config = config; } public Request(StreamInput in) throws IOException { super(in); - this.config = new DataFrameTransformConfig(in); + this.config = new TransformConfig(in); } public static Request fromXContent(final XContentParser parser) throws IOException { @@ -64,7 +64,7 @@ public class PreviewDataFrameTransformAction extends ActionType tempDestination = new HashMap<>(); tempDestination.put(DestConfig.INDEX.getPreferredName(), "unused-transform-preview-index"); // Users can still provide just dest.pipeline to preview what their data would look like given the pipeline ID - Object providedDestination = content.get(DataFrameField.DESTINATION.getPreferredName()); + Object providedDestination = content.get(TransformField.DESTINATION.getPreferredName()); if (providedDestination instanceof Map) { @SuppressWarnings("unchecked") Map destMap = (Map)providedDestination; @@ -73,15 +73,15 @@ public class PreviewDataFrameTransformAction extends ActionType { +public class PutTransformAction extends ActionType { - public static final PutDataFrameTransformAction INSTANCE = new PutDataFrameTransformAction(); + public static final PutTransformAction INSTANCE = new PutTransformAction(); public static final String NAME = "cluster:admin/data_frame/put"; private static final TimeValue MIN_FREQUENCY = TimeValue.timeValueSeconds(1); private static final TimeValue MAX_FREQUENCY = TimeValue.timeValueHours(1); - private PutDataFrameTransformAction() { + private PutTransformAction() { super(NAME, AcknowledgedResponse::new); } public static class Request extends AcknowledgedRequest { - private final DataFrameTransformConfig config; + private final TransformConfig config; private final boolean deferValidation; - public Request(DataFrameTransformConfig config, boolean deferValidation) { + public Request(TransformConfig config, boolean deferValidation) { this.config = config; this.deferValidation = deferValidation; } public Request(StreamInput in) throws IOException { super(in); - this.config = new DataFrameTransformConfig(in); + this.config = new TransformConfig(in); if (in.getVersion().onOrAfter(Version.V_7_4_0)) { this.deferValidation = in.readBoolean(); } else { @@ -61,12 +61,12 @@ public class PutDataFrameTransformAction extends ActionType 0) { validationException = addValidationError( - "highest permitted [" + DataFrameField.FREQUENCY + "] is [" + MAX_FREQUENCY.getStringRep() + "]", + "highest permitted [" + TransformField.FREQUENCY + "] is [" + MAX_FREQUENCY.getStringRep() + "]", validationException); } } @@ -117,7 +117,7 @@ public class PutDataFrameTransformAction extends ActionType { +public class StartTransformAction extends ActionType { - public static final StartDataFrameTransformAction INSTANCE = new StartDataFrameTransformAction(); + public static final StartTransformAction INSTANCE = new StartTransformAction(); public static final String NAME = "cluster:admin/data_frame/start"; - private StartDataFrameTransformAction() { - super(NAME, StartDataFrameTransformAction.Response::new); + private StartTransformAction() { + super(NAME, StartTransformAction.Response::new); } public static class Request extends AcknowledgedRequest { private final String id; - private final boolean force; - public Request(String id, boolean force) { - this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); - this.force = force; + public Request(String id) { + this.id = ExceptionsHelper.requireNonNull(id, TransformField.ID.getPreferredName()); } public Request(StreamInput in) throws IOException { super(in); id = in.readString(); - force = in.readBoolean(); + if(in.getVersion().before(Version.V_7_5_0)) { + in.readBoolean(); + } } public String getId() { return id; } - public boolean isForce() { - return force; - } - @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); - out.writeBoolean(force); + if(out.getVersion().before(Version.V_7_5_0)) { + out.writeBoolean(false); + } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java similarity index 90% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java index eef244551a3..4fd3ce7f54d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -20,8 +20,8 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; import java.io.IOException; import java.util.Arrays; @@ -32,15 +32,15 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeUnit; -public class StopDataFrameTransformAction extends ActionType { +public class StopTransformAction extends ActionType { - public static final StopDataFrameTransformAction INSTANCE = new StopDataFrameTransformAction(); + public static final StopTransformAction INSTANCE = new StopTransformAction(); public static final String NAME = "cluster:admin/data_frame/stop"; public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(30, TimeUnit.SECONDS); - private StopDataFrameTransformAction() { - super(NAME, StopDataFrameTransformAction.Response::new); + private StopTransformAction() { + super(NAME, StopTransformAction.Response::new); } public static class Request extends BaseTasksRequest { @@ -51,7 +51,7 @@ public class StopDataFrameTransformAction extends ActionType expandedIds; public Request(String id, boolean waitForCompletion, boolean force, @Nullable TimeValue timeout, boolean allowNoMatch) { - this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); + this.id = ExceptionsHelper.requireNonNull(id, TransformField.ID.getPreferredName()); this.waitForCompletion = waitForCompletion; this.force = force; @@ -151,8 +151,8 @@ public class StopDataFrameTransformAction extends ActionType { +public class UpdateTransformAction extends ActionType { - public static final UpdateDataFrameTransformAction INSTANCE = new UpdateDataFrameTransformAction(); + public static final UpdateTransformAction INSTANCE = new UpdateTransformAction(); public static final String NAME = "cluster:admin/data_frame/update"; private static final TimeValue MIN_FREQUENCY = TimeValue.timeValueSeconds(1); private static final TimeValue MAX_FREQUENCY = TimeValue.timeValueHours(1); - private UpdateDataFrameTransformAction() { + private UpdateTransformAction() { super(NAME, Response::new); } public static class Request extends AcknowledgedRequest { - private final DataFrameTransformConfigUpdate update; + private final TransformConfigUpdate update; private final String id; private final boolean deferValidation; - public Request(DataFrameTransformConfigUpdate update, String id, boolean deferValidation) { + public Request(TransformConfigUpdate update, String id, boolean deferValidation) { this.update = update; this.id = id; this.deferValidation = deferValidation; @@ -54,18 +54,18 @@ public class UpdateDataFrameTransformAction extends ActionType 0) { validationException = addValidationError( - "highest permitted [" + DataFrameField.FREQUENCY + "] is [" + MAX_FREQUENCY.getStringRep() + "]", + "highest permitted [" + TransformField.FREQUENCY + "] is [" + MAX_FREQUENCY.getStringRep() + "]", validationException); } } @@ -105,7 +105,7 @@ public class UpdateDataFrameTransformAction extends ActionType PARSER = - createParser("data_frame_audit_message", DataFrameAuditMessage::new, TRANSFORM_ID); + private static final ParseField TRANSFORM_ID = new ParseField(TransformField.TRANSFORM_ID); + public static final ConstructingObjectParser PARSER = + createParser("data_frame_audit_message", TransformAuditMessage::new, TRANSFORM_ID); - public DataFrameAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { + public TransformAuditMessage(String resourceId, String message, Level level, Date timestamp, String nodeName) { super(resourceId, message, level, timestamp, nodeName); } + @Override + public final String getJobType() { + return null; + } + @Override protected String getResourceField() { return TRANSFORM_ID.getPreferredName(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/DestConfig.java similarity index 96% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfig.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/DestConfig.java index 00d03066b6f..6d54685a402 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/DestConfig.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; @@ -15,7 +15,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; import java.io.IOException; import java.util.Objects; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/NodeAttributes.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/NodeAttributes.java similarity index 98% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/NodeAttributes.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/NodeAttributes.java index 76b40bd2377..1283db2f67f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/NodeAttributes.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/NodeAttributes.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.ParseField; @@ -15,7 +15,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; import java.io.IOException; import java.util.Collections; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfig.java similarity index 95% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfig.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfig.java index 981a56e639d..e65867c03e4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfig.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -23,7 +23,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.core.transform.TransformMessages; import java.io.IOException; import java.util.Collections; @@ -85,7 +85,7 @@ public class QueryConfig extends AbstractDiffable implements Writea query = AbstractQueryBuilder.parseInnerQueryBuilder(sourceParser); } catch (Exception e) { if (lenient) { - logger.warn(DataFrameMessages.LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_QUERY, e); + logger.warn(TransformMessages.LOG_TRANSFORM_CONFIGURATION_BAD_QUERY, e); } else { throw e; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/SourceConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfig.java similarity index 97% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/SourceConfig.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfig.java index 19671fd552c..b59a7912cf7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/SourceConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfig.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -15,7 +15,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; import java.io.IOException; import java.util.Arrays; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/SyncConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SyncConfig.java similarity index 69% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/SyncConfig.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SyncConfig.java index 19ff79ea7e0..44452426d40 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/SyncConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SyncConfig.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -19,7 +19,7 @@ public interface SyncConfig extends ToXContentObject, NamedWriteable { */ boolean isValid(); - QueryBuilder getRangeQuery(DataFrameTransformCheckpoint newCheckpoint); + QueryBuilder getRangeQuery(TransformCheckpoint newCheckpoint); - QueryBuilder getRangeQuery(DataFrameTransformCheckpoint oldCheckpoint, DataFrameTransformCheckpoint newCheckpoint); + QueryBuilder getRangeQuery(TransformCheckpoint oldCheckpoint, TransformCheckpoint newCheckpoint); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TimeSyncConfig.java similarity index 85% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfig.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TimeSyncConfig.java index 607d48b741f..d659a1f3905 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TimeSyncConfig.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -16,8 +16,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.transform.TransformField; import java.io.IOException; import java.util.Objects; @@ -43,10 +43,10 @@ public class TimeSyncConfig implements SyncConfig { TimeValue delay = (TimeValue) args[1]; return new TimeSyncConfig(field, delay); }); - parser.declareString(constructorArg(), DataFrameField.FIELD); + parser.declareString(constructorArg(), TransformField.FIELD); parser.declareField(optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), DEFAULT_DELAY, DataFrameField.DELAY.getPreferredName()), - DataFrameField.DELAY, + (p, c) -> TimeValue.parseTimeValue(p.text(), DEFAULT_DELAY, TransformField.DELAY.getPreferredName()), + TransformField.DELAY, ObjectParser.ValueType.STRING); return parser; } @@ -56,7 +56,7 @@ public class TimeSyncConfig implements SyncConfig { } public TimeSyncConfig(final String field, final TimeValue delay) { - this.field = ExceptionsHelper.requireNonNull(field, DataFrameField.FIELD.getPreferredName()); + this.field = ExceptionsHelper.requireNonNull(field, TransformField.FIELD.getPreferredName()); this.delay = delay == null ? DEFAULT_DELAY : delay; } @@ -87,8 +87,8 @@ public class TimeSyncConfig implements SyncConfig { @Override public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); - builder.field(DataFrameField.FIELD.getPreferredName(), field); - builder.field(DataFrameField.DELAY.getPreferredName(), delay.getStringRep()); + builder.field(TransformField.FIELD.getPreferredName(), field); + builder.field(TransformField.DELAY.getPreferredName(), delay.getStringRep()); builder.endObject(); return builder; } @@ -129,16 +129,16 @@ public class TimeSyncConfig implements SyncConfig { @Override public String getWriteableName() { - return DataFrameField.TIME_BASED_SYNC.getPreferredName(); + return TransformField.TIME_BASED_SYNC.getPreferredName(); } @Override - public QueryBuilder getRangeQuery(DataFrameTransformCheckpoint newCheckpoint) { + public QueryBuilder getRangeQuery(TransformCheckpoint newCheckpoint) { return new RangeQueryBuilder(field).lt(newCheckpoint.getTimeUpperBound()).format("epoch_millis"); } @Override - public QueryBuilder getRangeQuery(DataFrameTransformCheckpoint oldCheckpoint, DataFrameTransformCheckpoint newCheckpoint) { + public QueryBuilder getRangeQuery(TransformCheckpoint oldCheckpoint, TransformCheckpoint newCheckpoint) { return new RangeQueryBuilder(field).gte(oldCheckpoint.getTimeUpperBound()).lt(newCheckpoint.getTimeUpperBound()) .format("epoch_millis"); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpoint.java similarity index 83% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpoint.java index 69877c4cbe7..06586b60e1c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpoint.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpoint.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; @@ -16,7 +16,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.transform.TransformField; import java.io.IOException; import java.util.Arrays; @@ -30,7 +30,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constru import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** - * Checkpoint document to store the checkpoint of a data frame transform + * Checkpoint document to store the checkpoint of a transform * * The fields: * @@ -40,9 +40,9 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona * time_upper_bound for time-based indices this holds the upper time boundary of this checkpoint * */ -public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject { +public class TransformCheckpoint implements Writeable, ToXContentObject { - public static DataFrameTransformCheckpoint EMPTY = new DataFrameTransformCheckpoint("empty", 0L, -1L, Collections.emptyMap(), 0L); + public static TransformCheckpoint EMPTY = new TransformCheckpoint("empty", 0L, -1L, Collections.emptyMap(), 0L); // the own checkpoint public static final ParseField CHECKPOINT = new ParseField("checkpoint"); @@ -52,8 +52,8 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject private static final String NAME = "data_frame_transform_checkpoint"; - private static final ConstructingObjectParser STRICT_PARSER = createParser(false); - private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + private static final ConstructingObjectParser STRICT_PARSER = createParser(false); + private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); private final String transformId; private final long timestampMillis; @@ -61,8 +61,8 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject private final Map indicesCheckpoints; private final long timeUpperBoundMillis; - private static ConstructingObjectParser createParser(boolean lenient) { - ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, + private static ConstructingObjectParser createParser(boolean lenient) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, lenient, args -> { String id = (String) args[0]; long timestamp = (Long) args[1]; @@ -74,13 +74,13 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject Long timeUpperBound = (Long) args[4]; // ignored, only for internal storage: String docType = (String) args[5]; - return new DataFrameTransformCheckpoint(id, timestamp, checkpoint, checkpoints, timeUpperBound); + return new TransformCheckpoint(id, timestamp, checkpoint, checkpoints, timeUpperBound); }); - parser.declareString(constructorArg(), DataFrameField.ID); + parser.declareString(constructorArg(), TransformField.ID); // note: this is never parsed from the outside where timestamp can be formatted as date time - parser.declareLong(constructorArg(), DataFrameField.TIMESTAMP_MILLIS); + parser.declareLong(constructorArg(), TransformField.TIMESTAMP_MILLIS); parser.declareLong(constructorArg(), CHECKPOINT); parser.declareObject(constructorArg(), (p,c) -> { @@ -102,13 +102,13 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject } return checkPointsByIndexName; }, INDICES); - parser.declareLong(optionalConstructorArg(), DataFrameField.TIME_UPPER_BOUND_MILLIS); - parser.declareString(optionalConstructorArg(), DataFrameField.INDEX_DOC_TYPE); + parser.declareLong(optionalConstructorArg(), TransformField.TIME_UPPER_BOUND_MILLIS); + parser.declareString(optionalConstructorArg(), TransformField.INDEX_DOC_TYPE); return parser; } - public DataFrameTransformCheckpoint(String transformId, long timestamp, long checkpoint, Map checkpoints, + public TransformCheckpoint(String transformId, long timestamp, long checkpoint, Map checkpoints, Long timeUpperBound) { this.transformId = Objects.requireNonNull(transformId); this.timestampMillis = timestamp; @@ -117,7 +117,7 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject this.timeUpperBoundMillis = timeUpperBound == null ? 0 : timeUpperBound; } - public DataFrameTransformCheckpoint(StreamInput in) throws IOException { + public TransformCheckpoint(StreamInput in) throws IOException { this.transformId = in.readString(); this.timestampMillis = in.readLong(); this.checkpoint = in.readLong(); @@ -151,19 +151,19 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(DataFrameField.ID.getPreferredName(), transformId); + builder.field(TransformField.ID.getPreferredName(), transformId); builder.field(CHECKPOINT.getPreferredName(), checkpoint); - builder.field(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), NAME); + builder.field(TransformField.INDEX_DOC_TYPE.getPreferredName(), NAME); builder.startObject(INDICES.getPreferredName()); for (Entry entry : indicesCheckpoints.entrySet()) { builder.array(entry.getKey(), entry.getValue()); } builder.endObject(); - builder.field(DataFrameField.TIMESTAMP_MILLIS.getPreferredName(), timestampMillis); + builder.field(TransformField.TIMESTAMP_MILLIS.getPreferredName(), timestampMillis); if (timeUpperBoundMillis > 0) { - builder.field(DataFrameField.TIME_UPPER_BOUND_MILLIS.getPreferredName(), timeUpperBoundMillis); + builder.field(TransformField.TIME_UPPER_BOUND_MILLIS.getPreferredName(), timeUpperBoundMillis); } builder.endObject(); @@ -209,7 +209,7 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject return false; } - final DataFrameTransformCheckpoint that = (DataFrameTransformCheckpoint) other; + final TransformCheckpoint that = (TransformCheckpoint) other; // compare the timestamp, id, checkpoint and than call matches for the rest return this.timestampMillis == that.timestampMillis && this.checkpoint == that.checkpoint @@ -219,12 +219,12 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject /** * Compares 2 checkpoints ignoring some inner fields. * - * This is for comparing 2 checkpoints to check whether the data frame transform requires an update + * This is for comparing 2 checkpoints to check whether the transform requires an update * * @param that other checkpoint * @return true if checkpoints match */ - public boolean matches (DataFrameTransformCheckpoint that) { + public boolean matches (TransformCheckpoint that) { if (this == that) { return true; } @@ -246,7 +246,7 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject return hash; } - public static DataFrameTransformCheckpoint fromXContent(final XContentParser parser, boolean lenient) throws IOException { + public static TransformCheckpoint fromXContent(final XContentParser parser, boolean lenient) throws IOException { return lenient ? LENIENT_PARSER.apply(parser, null) : STRICT_PARSER.apply(parser, null); } @@ -270,7 +270,7 @@ public class DataFrameTransformCheckpoint implements Writeable, ToXContentObject * * @return count number of operations the checkpoint is behind or -1L if it could not calculate the difference */ - public static long getBehind(DataFrameTransformCheckpoint oldCheckpoint, DataFrameTransformCheckpoint newCheckpoint) { + public static long getBehind(TransformCheckpoint oldCheckpoint, TransformCheckpoint newCheckpoint) { if (oldCheckpoint.isTransient()) { if (newCheckpoint.isTransient() == false) { throw new IllegalArgumentException("can not compare transient against a non transient checkpoint"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointStats.java similarity index 63% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStats.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointStats.java index 5a19bfd3000..38238f3c7c0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointStats.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; @@ -14,7 +14,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.transform.TransformField; import java.io.IOException; import java.util.Objects; @@ -26,37 +26,37 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona * * This is the user-facing side of DataFrameTransformCheckpoint, containing only the stats to be exposed. */ -public class DataFrameTransformCheckpointStats implements Writeable, ToXContentObject { +public class TransformCheckpointStats implements Writeable, ToXContentObject { - public static final DataFrameTransformCheckpointStats EMPTY = new DataFrameTransformCheckpointStats(0L, null, null, 0L, 0L); + public static final TransformCheckpointStats EMPTY = new TransformCheckpointStats(0L, null, null, 0L, 0L); private final long checkpoint; - private final DataFrameIndexerPosition position; - private final DataFrameTransformProgress checkpointProgress; + private final TransformIndexerPosition position; + private final TransformProgress checkpointProgress; private final long timestampMillis; private final long timeUpperBoundMillis; - static final ConstructingObjectParser LENIENT_PARSER = new ConstructingObjectParser<>( + static final ConstructingObjectParser LENIENT_PARSER = new ConstructingObjectParser<>( "data_frame_transform_checkpoint_stats", true, args -> { long checkpoint = args[0] == null ? 0L : (Long) args[0]; - DataFrameIndexerPosition position = (DataFrameIndexerPosition) args[1]; - DataFrameTransformProgress checkpointProgress = (DataFrameTransformProgress) args[2]; + TransformIndexerPosition position = (TransformIndexerPosition) args[1]; + TransformProgress checkpointProgress = (TransformProgress) args[2]; long timestamp = args[3] == null ? 0L : (Long) args[3]; long timeUpperBound = args[4] == null ? 0L : (Long) args[4]; - return new DataFrameTransformCheckpointStats(checkpoint, position, checkpointProgress, timestamp, timeUpperBound); + return new TransformCheckpointStats(checkpoint, position, checkpointProgress, timestamp, timeUpperBound); }); static { - LENIENT_PARSER.declareLong(optionalConstructorArg(), DataFrameField.CHECKPOINT); - LENIENT_PARSER.declareObject(optionalConstructorArg(), DataFrameIndexerPosition.PARSER, DataFrameField.POSITION); - LENIENT_PARSER.declareObject(optionalConstructorArg(), DataFrameTransformProgress.PARSER, DataFrameField.CHECKPOINT_PROGRESS); - LENIENT_PARSER.declareLong(optionalConstructorArg(), DataFrameField.TIMESTAMP_MILLIS); - LENIENT_PARSER.declareLong(optionalConstructorArg(), DataFrameField.TIME_UPPER_BOUND_MILLIS); + LENIENT_PARSER.declareLong(optionalConstructorArg(), TransformField.CHECKPOINT); + LENIENT_PARSER.declareObject(optionalConstructorArg(), TransformIndexerPosition.PARSER, TransformField.POSITION); + LENIENT_PARSER.declareObject(optionalConstructorArg(), TransformProgress.PARSER, TransformField.CHECKPOINT_PROGRESS); + LENIENT_PARSER.declareLong(optionalConstructorArg(), TransformField.TIMESTAMP_MILLIS); + LENIENT_PARSER.declareLong(optionalConstructorArg(), TransformField.TIME_UPPER_BOUND_MILLIS); } - public DataFrameTransformCheckpointStats(final long checkpoint, final DataFrameIndexerPosition position, - final DataFrameTransformProgress checkpointProgress, final long timestampMillis, + public TransformCheckpointStats(final long checkpoint, final TransformIndexerPosition position, + final TransformProgress checkpointProgress, final long timestampMillis, final long timeUpperBoundMillis) { this.checkpoint = checkpoint; this.position = position; @@ -65,16 +65,16 @@ public class DataFrameTransformCheckpointStats implements Writeable, ToXContentO this.timeUpperBoundMillis = timeUpperBoundMillis; } - public DataFrameTransformCheckpointStats(StreamInput in) throws IOException { + public TransformCheckpointStats(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_7_4_0)) { this.checkpoint = in.readVLong(); if (in.readBoolean()) { - this.position = new DataFrameIndexerPosition(in); + this.position = new TransformIndexerPosition(in); } else { this.position = null; } if (in.readBoolean()) { - this.checkpointProgress = new DataFrameTransformProgress(in); + this.checkpointProgress = new TransformProgress(in); } else { this.checkpointProgress = null; } @@ -91,11 +91,11 @@ public class DataFrameTransformCheckpointStats implements Writeable, ToXContentO return checkpoint; } - public DataFrameIndexerPosition getPosition() { + public TransformIndexerPosition getPosition() { return position; } - public DataFrameTransformProgress getCheckpointProgress() { + public TransformProgress getCheckpointProgress() { return checkpointProgress; } @@ -110,19 +110,19 @@ public class DataFrameTransformCheckpointStats implements Writeable, ToXContentO @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(DataFrameField.CHECKPOINT.getPreferredName(), checkpoint); + builder.field(TransformField.CHECKPOINT.getPreferredName(), checkpoint); if (position != null) { - builder.field(DataFrameField.POSITION.getPreferredName(), position); + builder.field(TransformField.POSITION.getPreferredName(), position); } if (checkpointProgress != null) { - builder.field(DataFrameField.CHECKPOINT_PROGRESS.getPreferredName(), checkpointProgress); + builder.field(TransformField.CHECKPOINT_PROGRESS.getPreferredName(), checkpointProgress); } if (timestampMillis > 0) { - builder.timeField(DataFrameField.TIMESTAMP_MILLIS.getPreferredName(), DataFrameField.TIMESTAMP.getPreferredName(), + builder.timeField(TransformField.TIMESTAMP_MILLIS.getPreferredName(), TransformField.TIMESTAMP.getPreferredName(), timestampMillis); } if (timeUpperBoundMillis > 0) { - builder.timeField(DataFrameField.TIME_UPPER_BOUND_MILLIS.getPreferredName(), DataFrameField.TIME_UPPER_BOUND.getPreferredName(), + builder.timeField(TransformField.TIME_UPPER_BOUND_MILLIS.getPreferredName(), TransformField.TIME_UPPER_BOUND.getPreferredName(), timeUpperBoundMillis); } builder.endObject(); @@ -165,7 +165,7 @@ public class DataFrameTransformCheckpointStats implements Writeable, ToXContentO return false; } - DataFrameTransformCheckpointStats that = (DataFrameTransformCheckpointStats) other; + TransformCheckpointStats that = (TransformCheckpointStats) other; return this.checkpoint == that.checkpoint && Objects.equals(this.position, that.position) @@ -174,7 +174,7 @@ public class DataFrameTransformCheckpointStats implements Writeable, ToXContentO && this.timeUpperBoundMillis == that.timeUpperBoundMillis; } - public static DataFrameTransformCheckpointStats fromXContent(XContentParser p) { + public static TransformCheckpointStats fromXContent(XContentParser p) { return LENIENT_PARSER.apply(p, null); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointingInfo.java similarity index 74% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfo.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointingInfo.java index 004f89f977d..b0f665b7b80 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfo.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointingInfo.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; @@ -29,11 +29,11 @@ import java.util.Objects; * - the in progress checkpoint * - the current state of the source */ -public class DataFrameTransformCheckpointingInfo implements Writeable, ToXContentObject { +public class TransformCheckpointingInfo implements Writeable, ToXContentObject { - public static final DataFrameTransformCheckpointingInfo EMPTY = new DataFrameTransformCheckpointingInfo( - DataFrameTransformCheckpointStats.EMPTY, - DataFrameTransformCheckpointStats.EMPTY, + public static final TransformCheckpointingInfo EMPTY = new TransformCheckpointingInfo( + TransformCheckpointStats.EMPTY, + TransformCheckpointStats.EMPTY, 0L, null); @@ -41,30 +41,30 @@ public class DataFrameTransformCheckpointingInfo implements Writeable, ToXConten public static final ParseField NEXT_CHECKPOINT = new ParseField("next"); public static final ParseField OPERATIONS_BEHIND = new ParseField("operations_behind"); public static final ParseField CHANGES_LAST_DETECTED_AT = new ParseField("changes_last_detected_at"); - private final DataFrameTransformCheckpointStats last; - private final DataFrameTransformCheckpointStats next; + private final TransformCheckpointStats last; + private final TransformCheckpointStats next; private final long operationsBehind; private Instant changesLastDetectedAt; - private static final ConstructingObjectParser LENIENT_PARSER = + private static final ConstructingObjectParser LENIENT_PARSER = new ConstructingObjectParser<>( "data_frame_transform_checkpointing_info", true, a -> { long behind = a[2] == null ? 0L : (Long) a[2]; Instant changesLastDetectedAt = (Instant)a[3]; - return new DataFrameTransformCheckpointingInfo( - a[0] == null ? DataFrameTransformCheckpointStats.EMPTY : (DataFrameTransformCheckpointStats) a[0], - a[1] == null ? DataFrameTransformCheckpointStats.EMPTY : (DataFrameTransformCheckpointStats) a[1], + return new TransformCheckpointingInfo( + a[0] == null ? TransformCheckpointStats.EMPTY : (TransformCheckpointStats) a[0], + a[1] == null ? TransformCheckpointStats.EMPTY : (TransformCheckpointStats) a[1], behind, changesLastDetectedAt); }); static { LENIENT_PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), - DataFrameTransformCheckpointStats.LENIENT_PARSER::apply, LAST_CHECKPOINT); + TransformCheckpointStats.LENIENT_PARSER::apply, LAST_CHECKPOINT); LENIENT_PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), - DataFrameTransformCheckpointStats.LENIENT_PARSER::apply, NEXT_CHECKPOINT); + TransformCheckpointStats.LENIENT_PARSER::apply, NEXT_CHECKPOINT); LENIENT_PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), OPERATIONS_BEHIND); LENIENT_PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), p -> TimeUtils.parseTimeFieldToInstant(p, CHANGES_LAST_DETECTED_AT.getPreferredName()), @@ -81,8 +81,8 @@ public class DataFrameTransformCheckpointingInfo implements Writeable, ToXConten * @param operationsBehind counter of operations the current checkpoint is behind source * @param changesLastDetectedAt the last time the source indices were checked for changes */ - public DataFrameTransformCheckpointingInfo(DataFrameTransformCheckpointStats last, - DataFrameTransformCheckpointStats next, + public TransformCheckpointingInfo(TransformCheckpointStats last, + TransformCheckpointStats next, long operationsBehind, Instant changesLastDetectedAt) { this.last = Objects.requireNonNull(last); @@ -91,26 +91,26 @@ public class DataFrameTransformCheckpointingInfo implements Writeable, ToXConten this.changesLastDetectedAt = changesLastDetectedAt == null ? null : Instant.ofEpochMilli(changesLastDetectedAt.toEpochMilli()); } - public DataFrameTransformCheckpointingInfo(DataFrameTransformCheckpointStats last, - DataFrameTransformCheckpointStats next, + public TransformCheckpointingInfo(TransformCheckpointStats last, + TransformCheckpointStats next, long operationsBehind) { this(last, next, operationsBehind, null); } - public DataFrameTransformCheckpointingInfo(StreamInput in) throws IOException { - last = new DataFrameTransformCheckpointStats(in); - next = new DataFrameTransformCheckpointStats(in); + public TransformCheckpointingInfo(StreamInput in) throws IOException { + last = new TransformCheckpointStats(in); + next = new TransformCheckpointStats(in); operationsBehind = in.readLong(); if (in.getVersion().onOrAfter(Version.V_7_4_0)) { changesLastDetectedAt = in.readOptionalInstant(); } } - public DataFrameTransformCheckpointStats getLast() { + public TransformCheckpointStats getLast() { return last; } - public DataFrameTransformCheckpointStats getNext() { + public TransformCheckpointStats getNext() { return next; } @@ -122,7 +122,7 @@ public class DataFrameTransformCheckpointingInfo implements Writeable, ToXConten return changesLastDetectedAt; } - public DataFrameTransformCheckpointingInfo setChangesLastDetectedAt(Instant changesLastDetectedAt) { + public TransformCheckpointingInfo setChangesLastDetectedAt(Instant changesLastDetectedAt) { this.changesLastDetectedAt = Instant.ofEpochMilli(Objects.requireNonNull(changesLastDetectedAt).toEpochMilli()); return this; } @@ -154,7 +154,7 @@ public class DataFrameTransformCheckpointingInfo implements Writeable, ToXConten } } - public static DataFrameTransformCheckpointingInfo fromXContent(XContentParser p) { + public static TransformCheckpointingInfo fromXContent(XContentParser p) { return LENIENT_PARSER.apply(p, null); } @@ -173,7 +173,7 @@ public class DataFrameTransformCheckpointingInfo implements Writeable, ToXConten return false; } - DataFrameTransformCheckpointingInfo that = (DataFrameTransformCheckpointingInfo) other; + TransformCheckpointingInfo that = (TransformCheckpointingInfo) other; return Objects.equals(this.last, that.last) && Objects.equals(this.next, that.next) && diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java similarity index 80% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java index 62865f5e1e5..1d2294e53b9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; @@ -22,10 +22,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.xpack.core.common.time.TimeUtils; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfig; +import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; import java.io.IOException; import java.time.Instant; @@ -39,7 +39,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona /** * This class holds the configuration details of a data frame transform */ -public class DataFrameTransformConfig extends AbstractDiffable implements Writeable, ToXContentObject { +public class TransformConfig extends AbstractDiffable implements Writeable, ToXContentObject { public static final String NAME = "data_frame_transform_config"; public static final ParseField HEADERS = new ParseField("headers"); @@ -47,8 +47,8 @@ public class DataFrameTransformConfig extends AbstractDiffable STRICT_PARSER = createParser(false); - private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); + private static final ConstructingObjectParser STRICT_PARSER = createParser(false); + private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); static final int MAX_DESCRIPTION_LENGTH = 1_000; private final String id; @@ -71,8 +71,8 @@ public class DataFrameTransformConfig extends AbstractDiffable createParser(boolean lenient) { - ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, lenient, + private static ConstructingObjectParser createParser(boolean lenient) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(NAME, lenient, (args, optionalId) -> { String id = (String) args[0]; @@ -81,14 +81,14 @@ public class DataFrameTransformConfig extends AbstractDiffable SourceConfig.fromXContent(p, lenient), DataFrameField.SOURCE); - parser.declareObject(constructorArg(), (p, c) -> DestConfig.fromXContent(p, lenient), DataFrameField.DESTINATION); - parser.declareString(optionalConstructorArg(), DataFrameField.FREQUENCY); + parser.declareString(optionalConstructorArg(), TransformField.ID); + parser.declareObject(constructorArg(), (p, c) -> SourceConfig.fromXContent(p, lenient), TransformField.SOURCE); + parser.declareObject(constructorArg(), (p, c) -> DestConfig.fromXContent(p, lenient), TransformField.DESTINATION); + parser.declareString(optionalConstructorArg(), TransformField.FREQUENCY); - parser.declareObject(optionalConstructorArg(), (p, c) -> parseSyncConfig(p, lenient), DataFrameField.SYNC); + parser.declareObject(optionalConstructorArg(), (p, c) -> parseSyncConfig(p, lenient), TransformField.SYNC); - parser.declareString(optionalConstructorArg(), DataFrameField.INDEX_DOC_TYPE); + parser.declareString(optionalConstructorArg(), TransformField.INDEX_DOC_TYPE); parser.declareObject(optionalConstructorArg(), (p, c) -> p.mapStrings(), HEADERS); parser.declareObject(optionalConstructorArg(), (p, c) -> PivotConfig.fromXContent(p, lenient), PIVOT_TRANSFORM); - parser.declareString(optionalConstructorArg(), DataFrameField.DESCRIPTION); + parser.declareString(optionalConstructorArg(), TransformField.DESCRIPTION); parser.declareField(optionalConstructorArg(), - p -> TimeUtils.parseTimeFieldToInstant(p, DataFrameField.CREATE_TIME.getPreferredName()), DataFrameField.CREATE_TIME, + p -> TimeUtils.parseTimeFieldToInstant(p, TransformField.CREATE_TIME.getPreferredName()), TransformField.CREATE_TIME, ObjectParser.ValueType.VALUE); - parser.declareString(optionalConstructorArg(), DataFrameField.VERSION); + parser.declareString(optionalConstructorArg(), TransformField.VERSION); return parser; } @@ -148,7 +148,7 @@ public class DataFrameTransformConfig extends AbstractDiffable MAX_DESCRIPTION_LENGTH) { throw new IllegalArgumentException("[description] must be less than 1000 characters in length."); @@ -178,7 +178,7 @@ public class DataFrameTransformConfig extends AbstractDiffable headers) { + public TransformConfig setHeaders(Map headers) { this.headers = headers; return this; } @@ -245,7 +245,7 @@ public class DataFrameTransformConfig extends AbstractDiffable PARSER = new ConstructingObjectParser<>(NAME, + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, false, (args) -> { SourceConfig source = (SourceConfig) args[0]; DestConfig dest = (DestConfig) args[1]; TimeValue frequency = args[2] == null ? null : - TimeValue.parseTimeValue((String) args[2], DataFrameField.FREQUENCY.getPreferredName()); + TimeValue.parseTimeValue((String) args[2], TransformField.FREQUENCY.getPreferredName()); SyncConfig syncConfig = (SyncConfig) args[3]; String description = (String) args[4]; - return new DataFrameTransformConfigUpdate(source, dest, frequency, syncConfig, description); + return new TransformConfigUpdate(source, dest, frequency, syncConfig, description); }); static { - PARSER.declareObject(optionalConstructorArg(), (p, c) -> SourceConfig.fromXContent(p, false), DataFrameField.SOURCE); - PARSER.declareObject(optionalConstructorArg(), (p, c) -> DestConfig.fromXContent(p, false), DataFrameField.DESTINATION); - PARSER.declareString(optionalConstructorArg(), DataFrameField.FREQUENCY); - PARSER.declareObject(optionalConstructorArg(), (p, c) -> parseSyncConfig(p), DataFrameField.SYNC); - PARSER.declareString(optionalConstructorArg(), DataFrameField.DESCRIPTION); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> SourceConfig.fromXContent(p, false), TransformField.SOURCE); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> DestConfig.fromXContent(p, false), TransformField.DESTINATION); + PARSER.declareString(optionalConstructorArg(), TransformField.FREQUENCY); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> parseSyncConfig(p), TransformField.SYNC); + PARSER.declareString(optionalConstructorArg(), TransformField.DESCRIPTION); } private static SyncConfig parseSyncConfig(XContentParser parser) throws IOException { @@ -73,7 +73,7 @@ public class DataFrameTransformConfigUpdate implements Writeable, ToXContentObje private final String description; private Map headers; - public DataFrameTransformConfigUpdate(final SourceConfig source, + public TransformConfigUpdate(final SourceConfig source, final DestConfig dest, final TimeValue frequency, final SyncConfig syncConfig, @@ -88,7 +88,7 @@ public class DataFrameTransformConfigUpdate implements Writeable, ToXContentObje } } - public DataFrameTransformConfigUpdate(final StreamInput in) throws IOException { + public TransformConfigUpdate(final StreamInput in) throws IOException { source = in.readOptionalWriteable(SourceConfig::new); dest = in.readOptionalWriteable(DestConfig::new); frequency = in.readOptionalTimeValue(); @@ -147,24 +147,24 @@ public class DataFrameTransformConfigUpdate implements Writeable, ToXContentObje public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); if (source != null) { - builder.field(DataFrameField.SOURCE.getPreferredName(), source); + builder.field(TransformField.SOURCE.getPreferredName(), source); } if (dest != null) { - builder.field(DataFrameField.DESTINATION.getPreferredName(), dest); + builder.field(TransformField.DESTINATION.getPreferredName(), dest); } if (frequency != null) { - builder.field(DataFrameField.FREQUENCY.getPreferredName(), frequency.getStringRep()); + builder.field(TransformField.FREQUENCY.getPreferredName(), frequency.getStringRep()); } if (syncConfig != null) { - builder.startObject(DataFrameField.SYNC.getPreferredName()); + builder.startObject(TransformField.SYNC.getPreferredName()); builder.field(syncConfig.getWriteableName(), syncConfig); builder.endObject(); } if (description != null) { - builder.field(DataFrameField.DESCRIPTION.getPreferredName(), description); + builder.field(TransformField.DESCRIPTION.getPreferredName(), description); } if (headers != null) { - builder.field(DataFrameTransformConfig.HEADERS.getPreferredName(), headers); + builder.field(TransformConfig.HEADERS.getPreferredName(), headers); } builder.endObject(); return builder; @@ -180,7 +180,7 @@ public class DataFrameTransformConfigUpdate implements Writeable, ToXContentObje return false; } - final DataFrameTransformConfigUpdate that = (DataFrameTransformConfigUpdate) other; + final TransformConfigUpdate that = (TransformConfigUpdate) other; return Objects.equals(this.source, that.source) && Objects.equals(this.dest, that.dest) @@ -200,11 +200,11 @@ public class DataFrameTransformConfigUpdate implements Writeable, ToXContentObje return Strings.toString(this, true, true); } - public static DataFrameTransformConfigUpdate fromXContent(final XContentParser parser) { + public static TransformConfigUpdate fromXContent(final XContentParser parser) { return PARSER.apply(parser, null); } - public boolean isNoop(DataFrameTransformConfig config) { + public boolean isNoop(TransformConfig config) { return isNullOrEqual(source, config.getSource()) && isNullOrEqual(dest, config.getDestination()) && isNullOrEqual(frequency, config.getFrequency()) @@ -217,11 +217,11 @@ public class DataFrameTransformConfigUpdate implements Writeable, ToXContentObje return lft == null || lft.equals(rgt); } - public DataFrameTransformConfig apply(DataFrameTransformConfig config) { + public TransformConfig apply(TransformConfig config) { if (isNoop(config)) { return config; } - DataFrameTransformConfig.Builder builder = new DataFrameTransformConfig.Builder(config); + TransformConfig.Builder builder = new TransformConfig.Builder(config); if (source != null) { builder.setSource(source); } @@ -235,7 +235,7 @@ public class DataFrameTransformConfigUpdate implements Writeable, ToXContentObje String currentConfigName = config.getSyncConfig() == null ? "null" : config.getSyncConfig().getWriteableName(); if (syncConfig.getWriteableName().equals(currentConfigName) == false) { throw new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_UPDATE_CANNOT_CHANGE_SYNC_METHOD, + TransformMessages.getMessage(TransformMessages.TRANSFORM_UPDATE_CANNOT_CHANGE_SYNC_METHOD, config.getId(), currentConfigName, syncConfig.getWriteableName()), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerPosition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerPosition.java similarity index 88% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerPosition.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerPosition.java index 84d3a655593..392c221f705 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerPosition.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerPosition.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; @@ -24,7 +24,7 @@ import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -public class DataFrameIndexerPosition implements Writeable, ToXContentObject { +public class TransformIndexerPosition implements Writeable, ToXContentObject { public static final String NAME = "data_frame/indexer_position"; public static final ParseField INDEXER_POSITION = new ParseField("indexer_position"); @@ -34,21 +34,21 @@ public class DataFrameIndexerPosition implements Writeable, ToXContentObject { private final Map bucketPosition; @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, - args -> new DataFrameIndexerPosition((Map) args[0],(Map) args[1])); + args -> new TransformIndexerPosition((Map) args[0],(Map) args[1])); static { PARSER.declareField(optionalConstructorArg(), XContentParser::mapOrdered, INDEXER_POSITION, ValueType.OBJECT); PARSER.declareField(optionalConstructorArg(), XContentParser::mapOrdered, BUCKET_POSITION, ValueType.OBJECT); } - public DataFrameIndexerPosition(Map indexerPosition, Map bucketPosition) { + public TransformIndexerPosition(Map indexerPosition, Map bucketPosition) { this.indexerPosition = indexerPosition == null ? null : Collections.unmodifiableMap(indexerPosition); this.bucketPosition = bucketPosition == null ? null : Collections.unmodifiableMap(bucketPosition); } - public DataFrameIndexerPosition(StreamInput in) throws IOException { + public TransformIndexerPosition(StreamInput in) throws IOException { Map position = in.readMap(); indexerPosition = position == null ? null : Collections.unmodifiableMap(position); position = in.readMap(); @@ -92,7 +92,7 @@ public class DataFrameIndexerPosition implements Writeable, ToXContentObject { return false; } - DataFrameIndexerPosition that = (DataFrameIndexerPosition) other; + TransformIndexerPosition that = (TransformIndexerPosition) other; return Objects.equals(this.indexerPosition, that.indexerPosition) && Objects.equals(this.bucketPosition, that.bucketPosition); @@ -108,7 +108,7 @@ public class DataFrameIndexerPosition implements Writeable, ToXContentObject { return Strings.toString(this); } - public static DataFrameIndexerPosition fromXContent(XContentParser parser) { + public static TransformIndexerPosition fromXContent(XContentParser parser) { try { return PARSER.parse(parser, null); } catch (IOException e) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerStats.java similarity index 92% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerStats.java index 67d8ab5f6ce..7a513bc4d5f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerStats.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; @@ -21,7 +21,7 @@ import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -public class DataFrameIndexerTransformStats extends IndexerJobStats { +public class TransformIndexerStats extends IndexerJobStats { private static final String DEFAULT_TRANSFORM_ID = "_all"; // TODO remove when no longer needed for wire BWC @@ -48,9 +48,9 @@ public class DataFrameIndexerTransformStats extends IndexerJobStats { private static final int EXP_AVG_WINDOW = 10; private static final double ALPHA = 2.0/(EXP_AVG_WINDOW + 1); - private static final ConstructingObjectParser LENIENT_PARSER = new ConstructingObjectParser<>( + private static final ConstructingObjectParser LENIENT_PARSER = new ConstructingObjectParser<>( NAME, true, - args -> new DataFrameIndexerTransformStats( + args -> new TransformIndexerStats( (long) args[0], (long) args[1], (long) args[2], (long) args[3], (long) args[4], (long) args[5], (long) args[6], (long) args[7], (long) args[8], (long) args[9], (Double) args[10], (Double) args[11], (Double) args[12])); @@ -76,11 +76,11 @@ public class DataFrameIndexerTransformStats extends IndexerJobStats { /** * Create with all stats set to zero */ - public DataFrameIndexerTransformStats() { + public TransformIndexerStats() { super(); } - public DataFrameIndexerTransformStats(long numPages, long numInputDocuments, long numOutputDocuments, + public TransformIndexerStats(long numPages, long numInputDocuments, long numOutputDocuments, long numInvocations, long indexTime, long searchTime, long indexTotal, long searchTotal, long indexFailures, long searchFailures, Double expAvgCheckpointDurationMs, Double expAvgDocumentsIndexed, Double expAvgDocumentsProcessed ) { @@ -91,14 +91,14 @@ public class DataFrameIndexerTransformStats extends IndexerJobStats { this.expAvgDocumentsProcessed = expAvgDocumentsProcessed == null ? 0.0 : expAvgDocumentsProcessed; } - public DataFrameIndexerTransformStats(long numPages, long numInputDocuments, long numOutputDocuments, + public TransformIndexerStats(long numPages, long numInputDocuments, long numOutputDocuments, long numInvocations, long indexTime, long searchTime, long indexTotal, long searchTotal, long indexFailures, long searchFailures) { this(numPages, numInputDocuments, numOutputDocuments, numInvocations, indexTime, searchTime, indexTotal, searchTotal, indexFailures, searchFailures, 0.0, 0.0, 0.0); } - public DataFrameIndexerTransformStats(DataFrameIndexerTransformStats other) { + public TransformIndexerStats(TransformIndexerStats other) { this(other.numPages, other.numInputDocuments, other.numOuputDocuments, other.numInvocations, other.indexTime, other.searchTime, other.indexTotal, other.searchTotal, other.indexFailures, other.searchFailures); this.expAvgCheckpointDurationMs = other.expAvgCheckpointDurationMs; @@ -106,7 +106,7 @@ public class DataFrameIndexerTransformStats extends IndexerJobStats { this.expAvgDocumentsProcessed = other.expAvgDocumentsProcessed; } - public DataFrameIndexerTransformStats(StreamInput in) throws IOException { + public TransformIndexerStats(StreamInput in) throws IOException { super(in); if (in.getVersion().before(Version.V_7_4_0)) { in.readString(); // was transformId @@ -193,7 +193,7 @@ public class DataFrameIndexerTransformStats extends IndexerJobStats { return false; } - DataFrameIndexerTransformStats that = (DataFrameIndexerTransformStats) other; + TransformIndexerStats that = (TransformIndexerStats) other; return Objects.equals(this.numPages, that.numPages) && Objects.equals(this.numInputDocuments, that.numInputDocuments) @@ -217,7 +217,7 @@ public class DataFrameIndexerTransformStats extends IndexerJobStats { expAvgCheckpointDurationMs, expAvgDocumentsIndexed, expAvgDocumentsProcessed); } - public static DataFrameIndexerTransformStats fromXContent(XContentParser parser) { + public static TransformIndexerStats fromXContent(XContentParser parser) { try { return LENIENT_PARSER.parse(parser, null); } catch (IOException e) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgress.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgress.java similarity index 88% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgress.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgress.java index d7e409f4a52..a8ee87a7f8d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgress.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgress.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; @@ -21,7 +21,7 @@ import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -public class DataFrameTransformProgress implements Writeable, ToXContentObject { +public class TransformProgress implements Writeable, ToXContentObject { public static final ParseField TOTAL_DOCS = new ParseField("total_docs"); public static final ParseField DOCS_REMAINING = new ParseField("docs_remaining"); @@ -29,10 +29,10 @@ public class DataFrameTransformProgress implements Writeable, ToXContentObject { public static final ParseField DOCS_INDEXED = new ParseField("docs_indexed"); public static final String PERCENT_COMPLETE = "percent_complete"; - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "data_frame_transform_progress", true, - a -> new DataFrameTransformProgress((Long) a[0], (Long)a[1], (Long)a[2], (Long)a[3])); + a -> new TransformProgress((Long) a[0], (Long)a[1], (Long)a[2], (Long)a[3])); static { PARSER.declareLong(optionalConstructorArg(), TOTAL_DOCS); @@ -45,12 +45,12 @@ public class DataFrameTransformProgress implements Writeable, ToXContentObject { private long documentsProcessed; private long documentsIndexed; - public DataFrameTransformProgress() { + public TransformProgress() { this(null, 0L, 0L); } // If we are reading from an old document we need to convert docsRemaining to docsProcessed - public DataFrameTransformProgress(Long totalDocs, Long docsRemaining, Long documentsProcessed, Long documentsIndexed) { + public TransformProgress(Long totalDocs, Long docsRemaining, Long documentsProcessed, Long documentsIndexed) { this(totalDocs, documentsProcessed != null ? documentsProcessed : @@ -58,7 +58,7 @@ public class DataFrameTransformProgress implements Writeable, ToXContentObject { documentsIndexed); } - public DataFrameTransformProgress(Long totalDocs, Long documentsProcessed, Long documentsIndexed) { + public TransformProgress(Long totalDocs, Long documentsProcessed, Long documentsIndexed) { if (totalDocs != null && totalDocs < 0) { throw new IllegalArgumentException("[total_docs] must be >0."); } @@ -73,13 +73,13 @@ public class DataFrameTransformProgress implements Writeable, ToXContentObject { this.documentsIndexed = documentsIndexed == null ? 0 : documentsIndexed; } - public DataFrameTransformProgress(DataFrameTransformProgress otherProgress) { + public TransformProgress(TransformProgress otherProgress) { this.totalDocs = otherProgress.totalDocs; this.documentsProcessed = otherProgress.documentsProcessed; this.documentsIndexed = otherProgress.documentsIndexed; } - public DataFrameTransformProgress(StreamInput in) throws IOException { + public TransformProgress(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_7_4_0)) { this.totalDocs = in.readOptionalLong(); this.documentsProcessed = in.readVLong(); @@ -135,7 +135,7 @@ public class DataFrameTransformProgress implements Writeable, ToXContentObject { return false; } - DataFrameTransformProgress that = (DataFrameTransformProgress) other; + TransformProgress that = (TransformProgress) other; return Objects.equals(this.documentsIndexed, that.documentsIndexed) && Objects.equals(this.totalDocs, that.totalDocs) && Objects.equals(this.documentsProcessed, that.documentsProcessed); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java similarity index 74% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java index 6cc058e5acd..29ae1fe3968 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; @@ -18,8 +18,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.tasks.Task; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.transform.TransformField; import java.io.IOException; import java.util.Map; @@ -28,16 +28,16 @@ import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -public class DataFrameTransformState implements Task.Status, PersistentTaskState { - public static final String NAME = DataFrameField.TASK_NAME; +public class TransformState implements Task.Status, PersistentTaskState { + public static final String NAME = TransformField.TASK_NAME; - private final DataFrameTransformTaskState taskState; + private final TransformTaskState taskState; private final IndexerState indexerState; - private final DataFrameTransformProgress progress; + private final TransformProgress progress; private final long checkpoint; @Nullable - private final DataFrameIndexerPosition position; + private final TransformIndexerPosition position; @Nullable private final String reason; @Nullable @@ -55,44 +55,44 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState public static final ParseField NODE = new ParseField("node"); @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, args -> { - DataFrameTransformTaskState taskState = (DataFrameTransformTaskState) args[0]; + TransformTaskState taskState = (TransformTaskState) args[0]; IndexerState indexerState = (IndexerState) args[1]; Map bwcCurrentPosition = (Map) args[2]; - DataFrameIndexerPosition dataFrameIndexerPosition = (DataFrameIndexerPosition) args[3]; + TransformIndexerPosition transformIndexerPosition = (TransformIndexerPosition) args[3]; // BWC handling, translate current_position to position iff position isn't set - if (bwcCurrentPosition != null && dataFrameIndexerPosition == null) { - dataFrameIndexerPosition = new DataFrameIndexerPosition(bwcCurrentPosition, null); + if (bwcCurrentPosition != null && transformIndexerPosition == null) { + transformIndexerPosition = new TransformIndexerPosition(bwcCurrentPosition, null); } long checkpoint = (long) args[4]; String reason = (String) args[5]; - DataFrameTransformProgress progress = (DataFrameTransformProgress) args[6]; + TransformProgress progress = (TransformProgress) args[6]; NodeAttributes node = (NodeAttributes) args[7]; - return new DataFrameTransformState(taskState, indexerState, dataFrameIndexerPosition, checkpoint, reason, progress, node); + return new TransformState(taskState, indexerState, transformIndexerPosition, checkpoint, reason, progress, node); }); static { - PARSER.declareField(constructorArg(), p -> DataFrameTransformTaskState.fromString(p.text()), TASK_STATE, ValueType.STRING); + PARSER.declareField(constructorArg(), p -> TransformTaskState.fromString(p.text()), TASK_STATE, ValueType.STRING); PARSER.declareField(constructorArg(), p -> IndexerState.fromString(p.text()), INDEXER_STATE, ValueType.STRING); PARSER.declareField(optionalConstructorArg(), XContentParser::mapOrdered, CURRENT_POSITION, ValueType.OBJECT); - PARSER.declareField(optionalConstructorArg(), DataFrameIndexerPosition::fromXContent, POSITION, ValueType.OBJECT); + PARSER.declareField(optionalConstructorArg(), TransformIndexerPosition::fromXContent, POSITION, ValueType.OBJECT); PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), CHECKPOINT); PARSER.declareString(optionalConstructorArg(), REASON); - PARSER.declareField(optionalConstructorArg(), DataFrameTransformProgress.PARSER::apply, PROGRESS, ValueType.OBJECT); + PARSER.declareField(optionalConstructorArg(), TransformProgress.PARSER::apply, PROGRESS, ValueType.OBJECT); PARSER.declareField(optionalConstructorArg(), NodeAttributes.PARSER::apply, NODE, ValueType.OBJECT); } - public DataFrameTransformState(DataFrameTransformTaskState taskState, + public TransformState(TransformTaskState taskState, IndexerState indexerState, - @Nullable DataFrameIndexerPosition position, + @Nullable TransformIndexerPosition position, long checkpoint, @Nullable String reason, - @Nullable DataFrameTransformProgress progress, + @Nullable TransformProgress progress, @Nullable NodeAttributes node) { this.taskState = taskState; this.indexerState = indexerState; @@ -103,27 +103,27 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState this.node = node; } - public DataFrameTransformState(DataFrameTransformTaskState taskState, + public TransformState(TransformTaskState taskState, IndexerState indexerState, - @Nullable DataFrameIndexerPosition position, + @Nullable TransformIndexerPosition position, long checkpoint, @Nullable String reason, - @Nullable DataFrameTransformProgress progress) { + @Nullable TransformProgress progress) { this(taskState, indexerState, position, checkpoint, reason, progress, null); } - public DataFrameTransformState(StreamInput in) throws IOException { - taskState = DataFrameTransformTaskState.fromStream(in); + public TransformState(StreamInput in) throws IOException { + taskState = TransformTaskState.fromStream(in); indexerState = IndexerState.fromStream(in); if (in.getVersion().onOrAfter(Version.V_7_3_0)) { - position = in.readOptionalWriteable(DataFrameIndexerPosition::new); + position = in.readOptionalWriteable(TransformIndexerPosition::new); } else { Map pos = in.readMap(); - position = new DataFrameIndexerPosition(pos, null); + position = new TransformIndexerPosition(pos, null); } checkpoint = in.readLong(); reason = in.readOptionalString(); - progress = in.readOptionalWriteable(DataFrameTransformProgress::new); + progress = in.readOptionalWriteable(TransformProgress::new); if (in.getVersion().onOrAfter(Version.V_7_3_0)) { node = in.readOptionalWriteable(NodeAttributes::new); } else { @@ -131,7 +131,7 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState } } - public DataFrameTransformTaskState getTaskState() { + public TransformTaskState getTaskState() { return taskState; } @@ -139,7 +139,7 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState return indexerState; } - public DataFrameIndexerPosition getPosition() { + public TransformIndexerPosition getPosition() { return position; } @@ -147,7 +147,7 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState return checkpoint; } - public DataFrameTransformProgress getProgress() { + public TransformProgress getProgress() { return progress; } @@ -159,12 +159,12 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState return node; } - public DataFrameTransformState setNode(NodeAttributes node) { + public TransformState setNode(NodeAttributes node) { this.node = node; return this; } - public static DataFrameTransformState fromXContent(XContentParser parser) { + public static TransformState fromXContent(XContentParser parser) { try { return PARSER.parse(parser, null); } catch (IOException e) { @@ -226,7 +226,7 @@ public class DataFrameTransformState implements Task.Status, PersistentTaskState return false; } - DataFrameTransformState that = (DataFrameTransformState) other; + TransformState that = (TransformState) other; return Objects.equals(this.taskState, that.taskState) && Objects.equals(this.indexerState, that.indexerState) && diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformStats.java similarity index 71% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStats.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformStats.java index 6b58374925b..c719cf7723d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformStats.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; @@ -19,8 +19,8 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.transform.TransformField; import java.io.IOException; import java.util.Locale; @@ -34,7 +34,7 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona * Objects of this class are expected to be ephemeral. * Do not persist objects of this class to cluster state or an index. */ -public class DataFrameTransformStats implements Writeable, ToXContentObject { +public class TransformStats implements Writeable, ToXContentObject { public static final String NAME = "data_frame_transform_stats"; public static final ParseField STATE_FIELD = new ParseField("state"); @@ -48,52 +48,52 @@ public class DataFrameTransformStats implements Writeable, ToXContentObject { private final String reason; @Nullable private NodeAttributes node; - private final DataFrameIndexerTransformStats indexerStats; - private final DataFrameTransformCheckpointingInfo checkpointingInfo; + private final TransformIndexerStats indexerStats; + private final TransformCheckpointingInfo checkpointingInfo; - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( NAME, true, - a -> new DataFrameTransformStats((String) a[0], + a -> new TransformStats((String) a[0], (State) a[1], (String) a[2], (NodeAttributes) a[3], - (DataFrameIndexerTransformStats) a[4], - (DataFrameTransformCheckpointingInfo) a[5])); + (TransformIndexerStats) a[4], + (TransformCheckpointingInfo) a[5])); static { - PARSER.declareString(constructorArg(), DataFrameField.ID); - PARSER.declareField(constructorArg(), p -> DataFrameTransformStats.State.fromString(p.text()), STATE_FIELD, + PARSER.declareString(constructorArg(), TransformField.ID); + PARSER.declareField(constructorArg(), p -> TransformStats.State.fromString(p.text()), STATE_FIELD, ObjectParser.ValueType.STRING); PARSER.declareString(optionalConstructorArg(), REASON_FIELD); PARSER.declareField(optionalConstructorArg(), NodeAttributes.PARSER::apply, NODE_FIELD, ObjectParser.ValueType.OBJECT); - PARSER.declareObject(constructorArg(), (p, c) -> DataFrameIndexerTransformStats.fromXContent(p), - DataFrameField.STATS_FIELD); + PARSER.declareObject(constructorArg(), (p, c) -> TransformIndexerStats.fromXContent(p), + TransformField.STATS_FIELD); PARSER.declareObject(constructorArg(), - (p, c) -> DataFrameTransformCheckpointingInfo.fromXContent(p), CHECKPOINTING_INFO_FIELD); + (p, c) -> TransformCheckpointingInfo.fromXContent(p), CHECKPOINTING_INFO_FIELD); } - public static DataFrameTransformStats fromXContent(XContentParser parser) throws IOException { + public static TransformStats fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } - public static DataFrameTransformStats initialStats(String id) { - return stoppedStats(id, new DataFrameIndexerTransformStats()); + public static TransformStats initialStats(String id) { + return stoppedStats(id, new TransformIndexerStats()); } - public static DataFrameTransformStats stoppedStats(String id, DataFrameIndexerTransformStats indexerTransformStats) { - return new DataFrameTransformStats(id, + public static TransformStats stoppedStats(String id, TransformIndexerStats indexerTransformStats) { + return new TransformStats(id, State.STOPPED, null, null, indexerTransformStats, - DataFrameTransformCheckpointingInfo.EMPTY); + TransformCheckpointingInfo.EMPTY); } - public DataFrameTransformStats(String id, State state, @Nullable String reason, - @Nullable NodeAttributes node, DataFrameIndexerTransformStats stats, - DataFrameTransformCheckpointingInfo checkpointingInfo) { + public TransformStats(String id, State state, @Nullable String reason, + @Nullable NodeAttributes node, TransformIndexerStats stats, + TransformCheckpointingInfo checkpointingInfo) { this.id = Objects.requireNonNull(id); this.state = Objects.requireNonNull(state); this.reason = reason; @@ -102,7 +102,7 @@ public class DataFrameTransformStats implements Writeable, ToXContentObject { this.checkpointingInfo = Objects.requireNonNull(checkpointingInfo); } - public DataFrameTransformStats(StreamInput in) throws IOException { + public TransformStats(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_7_4_0)) { this.id = in.readString(); this.state = in.readEnum(State.class); @@ -112,27 +112,27 @@ public class DataFrameTransformStats implements Writeable, ToXContentObject { } else { this.node = null; } - this.indexerStats = new DataFrameIndexerTransformStats(in); - this.checkpointingInfo = new DataFrameTransformCheckpointingInfo(in); + this.indexerStats = new TransformIndexerStats(in); + this.checkpointingInfo = new TransformCheckpointingInfo(in); } else { // Prior to version 7.4 DataFrameTransformStats didn't exist, and we have // to do the best we can of reading from a DataFrameTransformStoredDoc object // (which is called DataFrameTransformStateAndStats in 7.2/7.3) this.id = in.readString(); - DataFrameTransformState transformState = new DataFrameTransformState(in); + TransformState transformState = new TransformState(in); this.state = State.fromComponents(transformState.getTaskState(), transformState.getIndexerState()); this.reason = transformState.getReason(); this.node = null; - this.indexerStats = new DataFrameIndexerTransformStats(in); - this.checkpointingInfo = new DataFrameTransformCheckpointingInfo(in); + this.indexerStats = new TransformIndexerStats(in); + this.checkpointingInfo = new TransformCheckpointingInfo(in); } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(DataFrameField.ID.getPreferredName(), id); + builder.field(TransformField.ID.getPreferredName(), id); builder.field(STATE_FIELD.getPreferredName(), state.value()); if (reason != null) { builder.field(REASON_FIELD.getPreferredName(), reason); @@ -140,7 +140,7 @@ public class DataFrameTransformStats implements Writeable, ToXContentObject { if (node != null) { builder.field(NODE_FIELD.getPreferredName(), node); } - builder.field(DataFrameField.STATS_FIELD.getPreferredName(), indexerStats, params); + builder.field(TransformField.STATS_FIELD.getPreferredName(), indexerStats, params); builder.field(CHECKPOINTING_INFO_FIELD.getPreferredName(), checkpointingInfo, params); builder.endObject(); return builder; @@ -165,8 +165,8 @@ public class DataFrameTransformStats implements Writeable, ToXContentObject { // to do the best we can of writing to a DataFrameTransformStoredDoc object // (which is called DataFrameTransformStateAndStats in 7.2/7.3) out.writeString(id); - Tuple stateComponents = state.toComponents(); - new DataFrameTransformState(stateComponents.v1(), + Tuple stateComponents = state.toComponents(); + new TransformState(stateComponents.v1(), stateComponents.v2(), checkpointingInfo.getNext().getPosition(), checkpointingInfo.getLast().getCheckpoint(), @@ -193,7 +193,7 @@ public class DataFrameTransformStats implements Writeable, ToXContentObject { return false; } - DataFrameTransformStats that = (DataFrameTransformStats) other; + TransformStats that = (TransformStats) other; return Objects.equals(this.id, that.id) && Objects.equals(this.state, that.state) @@ -225,11 +225,11 @@ public class DataFrameTransformStats implements Writeable, ToXContentObject { this.node = node; } - public DataFrameIndexerTransformStats getIndexerStats() { + public TransformIndexerStats getIndexerStats() { return indexerStats; } - public DataFrameTransformCheckpointingInfo getCheckpointingInfo() { + public TransformCheckpointingInfo getCheckpointingInfo() { return checkpointingInfo; } @@ -250,16 +250,16 @@ public class DataFrameTransformStats implements Writeable, ToXContentObject { return in.readEnum(State.class); } - public static State fromComponents(DataFrameTransformTaskState taskState, IndexerState indexerState) { + public static State fromComponents(TransformTaskState taskState, IndexerState indexerState) { - if (taskState == null || taskState == DataFrameTransformTaskState.STOPPED) { + if (taskState == null || taskState == TransformTaskState.STOPPED) { return STOPPED; - } else if (taskState == DataFrameTransformTaskState.FAILED) { + } else if (taskState == TransformTaskState.FAILED) { return FAILED; } else { // If we get here then the task state must be started, and that means we should have an indexer state - assert(taskState == DataFrameTransformTaskState.STARTED); + assert(taskState == TransformTaskState.STARTED); assert(indexerState != null); switch (indexerState) { @@ -288,25 +288,25 @@ public class DataFrameTransformStats implements Writeable, ToXContentObject { return name().toLowerCase(Locale.ROOT); } - public Tuple toComponents() { + public Tuple toComponents() { switch (this) { case STARTED: - return new Tuple<>(DataFrameTransformTaskState.STARTED, IndexerState.STARTED); + return new Tuple<>(TransformTaskState.STARTED, IndexerState.STARTED); case INDEXING: - return new Tuple<>(DataFrameTransformTaskState.STARTED, IndexerState.INDEXING); + return new Tuple<>(TransformTaskState.STARTED, IndexerState.INDEXING); case ABORTING: - return new Tuple<>(DataFrameTransformTaskState.STARTED, IndexerState.ABORTING); + return new Tuple<>(TransformTaskState.STARTED, IndexerState.ABORTING); case STOPPING: - return new Tuple<>(DataFrameTransformTaskState.STARTED, IndexerState.STOPPING); + return new Tuple<>(TransformTaskState.STARTED, IndexerState.STOPPING); case STOPPED: // This one is not deterministic, because an overall state of STOPPED could arise // from either (STOPPED, null) or (STARTED, STOPPED). However, (STARTED, STOPPED) // is a very short-lived state so it's reasonable to assume the other, especially // as this method is only for mixed version cluster compatibility. - return new Tuple<>(DataFrameTransformTaskState.STOPPED, null); + return new Tuple<>(TransformTaskState.STOPPED, null); case FAILED: - return new Tuple<>(DataFrameTransformTaskState.FAILED, null); + return new Tuple<>(TransformTaskState.FAILED, null); default: throw new IllegalStateException("Unexpected state enum value: " + this); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStoredDoc.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformStoredDoc.java similarity index 64% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStoredDoc.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformStoredDoc.java index 8fa7000a659..7d2abe81684 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStoredDoc.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformStoredDoc.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; @@ -16,7 +16,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.transform.TransformField; import java.io.IOException; import java.util.Objects; @@ -25,34 +25,34 @@ import java.util.Objects; * A wrapper for grouping transform state and stats when persisting to an index. * Not intended to be returned in endpoint responses. */ -public class DataFrameTransformStoredDoc implements Writeable, ToXContentObject { +public class TransformStoredDoc implements Writeable, ToXContentObject { public static final String NAME = "data_frame_transform_state_and_stats"; public static final ParseField STATE_FIELD = new ParseField("state"); private final String id; - private final DataFrameTransformState transformState; - private final DataFrameIndexerTransformStats transformStats; + private final TransformState transformState; + private final TransformIndexerStats transformStats; - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( NAME, true, - a -> new DataFrameTransformStoredDoc((String) a[0], - (DataFrameTransformState) a[1], - (DataFrameIndexerTransformStats) a[2])); + a -> new TransformStoredDoc((String) a[0], + (TransformState) a[1], + (TransformIndexerStats) a[2])); static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), DataFrameField.ID); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), DataFrameTransformState.PARSER::apply, STATE_FIELD); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> DataFrameIndexerTransformStats.fromXContent(p), - DataFrameField.STATS_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), TransformField.ID); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), TransformState.PARSER::apply, STATE_FIELD); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> TransformIndexerStats.fromXContent(p), + TransformField.STATS_FIELD); } - public static DataFrameTransformStoredDoc fromXContent(XContentParser parser) throws IOException { + public static TransformStoredDoc fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } /** - * Get the persisted state and stats document name from the Data Frame Transform Id. + * Get the persisted state and stats document name from the Transform Id. * * @return The id of document the where the transform stats are persisted */ @@ -60,28 +60,28 @@ public class DataFrameTransformStoredDoc implements Writeable, ToXContentObject return NAME + "-" + transformId; } - public DataFrameTransformStoredDoc(String id, DataFrameTransformState state, DataFrameIndexerTransformStats stats) { + public TransformStoredDoc(String id, TransformState state, TransformIndexerStats stats) { this.id = Objects.requireNonNull(id); this.transformState = Objects.requireNonNull(state); this.transformStats = Objects.requireNonNull(stats); } - public DataFrameTransformStoredDoc(StreamInput in) throws IOException { + public TransformStoredDoc(StreamInput in) throws IOException { this.id = in.readString(); - this.transformState = new DataFrameTransformState(in); - this.transformStats = new DataFrameIndexerTransformStats(in); + this.transformState = new TransformState(in); + this.transformStats = new TransformIndexerStats(in); if (in.getVersion().before(Version.V_7_4_0)) { - new DataFrameTransformCheckpointingInfo(in); + new TransformCheckpointingInfo(in); } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(DataFrameField.ID.getPreferredName(), id); + builder.field(TransformField.ID.getPreferredName(), id); builder.field(STATE_FIELD.getPreferredName(), transformState, params); - builder.field(DataFrameField.STATS_FIELD.getPreferredName(), transformStats, params); - builder.field(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), NAME); + builder.field(TransformField.STATS_FIELD.getPreferredName(), transformStats, params); + builder.field(TransformField.INDEX_DOC_TYPE.getPreferredName(), NAME); builder.endObject(); return builder; } @@ -92,7 +92,7 @@ public class DataFrameTransformStoredDoc implements Writeable, ToXContentObject transformState.writeTo(out); transformStats.writeTo(out); if (out.getVersion().before(Version.V_7_4_0)) { - DataFrameTransformCheckpointingInfo.EMPTY.writeTo(out); + TransformCheckpointingInfo.EMPTY.writeTo(out); } } @@ -111,7 +111,7 @@ public class DataFrameTransformStoredDoc implements Writeable, ToXContentObject return false; } - DataFrameTransformStoredDoc that = (DataFrameTransformStoredDoc) other; + TransformStoredDoc that = (TransformStoredDoc) other; return Objects.equals(this.id, that.id) && Objects.equals(this.transformState, that.transformState) @@ -122,11 +122,11 @@ public class DataFrameTransformStoredDoc implements Writeable, ToXContentObject return id; } - public DataFrameIndexerTransformStats getTransformStats() { + public TransformIndexerStats getTransformStats() { return transformStats; } - public DataFrameTransformState getTransformState() { + public TransformState getTransformState() { return transformState; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransform.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskParams.java similarity index 75% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransform.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskParams.java index 9fae4816227..7d77978fd65 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransform.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskParams.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; @@ -16,41 +16,41 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.XPackPlugin; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.transform.TransformField; import java.io.IOException; import java.util.Objects; -public class DataFrameTransform extends AbstractDiffable implements XPackPlugin.XPackPersistentTaskParams { +public class TransformTaskParams extends AbstractDiffable implements XPackPlugin.XPackPersistentTaskParams { - public static final String NAME = DataFrameField.TASK_NAME; - public static final ParseField FREQUENCY = DataFrameField.FREQUENCY; + public static final String NAME = TransformField.TASK_NAME; + public static final ParseField FREQUENCY = TransformField.FREQUENCY; private final String transformId; private final Version version; private final TimeValue frequency; - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, - a -> new DataFrameTransform((String) a[0], (String) a[1], (String) a[2])); + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, + a -> new TransformTaskParams((String) a[0], (String) a[1], (String) a[2])); static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), DataFrameField.ID); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), DataFrameField.VERSION); + PARSER.declareString(ConstructingObjectParser.constructorArg(), TransformField.ID); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), TransformField.VERSION); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FREQUENCY); } - private DataFrameTransform(String transformId, String version, String frequency) { + private TransformTaskParams(String transformId, String version, String frequency) { this(transformId, version == null ? null : Version.fromString(version), frequency == null ? null : TimeValue.parseTimeValue(frequency, FREQUENCY.getPreferredName())); } - public DataFrameTransform(String transformId, Version version, TimeValue frequency) { + public TransformTaskParams(String transformId, Version version, TimeValue frequency) { this.transformId = transformId; this.version = version == null ? Version.V_7_2_0 : version; this.frequency = frequency; } - public DataFrameTransform(StreamInput in) throws IOException { + public TransformTaskParams(StreamInput in) throws IOException { this.transformId = in.readString(); if (in.getVersion().onOrAfter(Version.V_7_3_0)) { this.version = Version.readVersion(in); @@ -88,8 +88,8 @@ public class DataFrameTransform extends AbstractDiffable imp @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(DataFrameField.ID.getPreferredName(), transformId); - builder.field(DataFrameField.VERSION.getPreferredName(), version); + builder.field(TransformField.ID.getPreferredName(), transformId); + builder.field(TransformField.VERSION.getPreferredName(), version); if (frequency != null) { builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep()); } @@ -109,7 +109,7 @@ public class DataFrameTransform extends AbstractDiffable imp return frequency; } - public static DataFrameTransform fromXContent(XContentParser parser) throws IOException { + public static TransformTaskParams fromXContent(XContentParser parser) throws IOException { return PARSER.parse(parser, null); } @@ -123,7 +123,7 @@ public class DataFrameTransform extends AbstractDiffable imp return false; } - DataFrameTransform that = (DataFrameTransform) other; + TransformTaskParams that = (TransformTaskParams) other; return Objects.equals(this.transformId, that.transformId) && Objects.equals(this.version, that.version) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTaskState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskState.java similarity index 65% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTaskState.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskState.java index 795daca61ac..e807dafc8b4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTaskState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskState.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -13,20 +13,20 @@ import org.elasticsearch.common.io.stream.Writeable; import java.io.IOException; import java.util.Locale; -public enum DataFrameTransformTaskState implements Writeable { +public enum TransformTaskState implements Writeable { STOPPED, STARTED, FAILED; - public static DataFrameTransformTaskState fromString(String name) { + public static TransformTaskState fromString(String name) { return valueOf(name.trim().toUpperCase(Locale.ROOT)); } - public static DataFrameTransformTaskState fromStream(StreamInput in) throws IOException { - return in.readEnum(DataFrameTransformTaskState.class); + public static TransformTaskState fromStream(StreamInput in) throws IOException { + return in.readEnum(TransformTaskState.class); } @Override public void writeTo(StreamOutput out) throws IOException { - DataFrameTransformTaskState state = this; + TransformTaskState state = this; out.writeEnum(state); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/AggregationConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfig.java similarity index 90% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/AggregationConfig.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfig.java index 4942f335eee..6617eae7ea6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/AggregationConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfig.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -22,7 +22,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; +import org.elasticsearch.xpack.core.transform.TransformMessages; import java.io.IOException; import java.util.Collection; @@ -78,9 +78,9 @@ public class AggregationConfig implements Writeable, ToXContentObject { if (source.isEmpty()) { if (lenient) { - logger.warn(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_AGGREGATION); + logger.warn(TransformMessages.TRANSFORM_CONFIGURATION_PIVOT_NO_AGGREGATION); } else { - throw new IllegalArgumentException(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_AGGREGATION); + throw new IllegalArgumentException(TransformMessages.TRANSFORM_CONFIGURATION_PIVOT_NO_AGGREGATION); } } else { try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); @@ -90,7 +90,7 @@ public class AggregationConfig implements Writeable, ToXContentObject { aggregations = AggregatorFactories.parseAggregators(sourceParser); } catch (Exception e) { if (lenient) { - logger.warn(DataFrameMessages.LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_AGGREGATION, e); + logger.warn(TransformMessages.LOG_TRANSFORM_CONFIGURATION_BAD_AGGREGATION, e); } else { throw e; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java similarity index 99% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java index 3b856f454c4..c307b9e2a74 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfig.java similarity index 91% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfig.java index 532477c44bd..e97315edc49 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfig.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -21,9 +21,9 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; import java.io.IOException; import java.util.LinkedHashMap; @@ -45,7 +45,7 @@ public class GroupConfig implements Writeable, ToXContentObject { private final Map groups; public GroupConfig(final Map source, final Map groups) { - this.source = ExceptionsHelper.requireNonNull(source, DataFrameField.GROUP_BY.getPreferredName()); + this.source = ExceptionsHelper.requireNonNull(source, TransformField.GROUP_BY.getPreferredName()); this.groups = groups; } @@ -115,9 +115,9 @@ public class GroupConfig implements Writeable, ToXContentObject { if (source.isEmpty()) { if (lenient) { - logger.warn(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_GROUP_BY); + logger.warn(TransformMessages.TRANSFORM_CONFIGURATION_PIVOT_NO_GROUP_BY); } else { - throw new IllegalArgumentException(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_PIVOT_NO_GROUP_BY); + throw new IllegalArgumentException(TransformMessages.TRANSFORM_CONFIGURATION_PIVOT_NO_GROUP_BY); } } else { try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); @@ -126,7 +126,7 @@ public class GroupConfig implements Writeable, ToXContentObject { groups = parseGroupConfig(sourceParser, lenient); } catch (Exception e) { if (lenient) { - logger.warn(DataFrameMessages.LOG_DATA_FRAME_TRANSFORM_CONFIGURATION_BAD_GROUP_BY, e); + logger.warn(TransformMessages.LOG_TRANSFORM_CONFIGURATION_BAD_GROUP_BY, e); } else { throw e; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/HistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSource.java similarity index 98% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/HistogramGroupSource.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSource.java index 372f4ad99b6..490096d319b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/HistogramGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSource.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/PivotConfig.java similarity index 92% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/PivotConfig.java index 038299bfd83..9d040c1180b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/PivotConfig.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; @@ -17,8 +17,8 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; import java.io.IOException; import java.util.ArrayList; @@ -67,18 +67,18 @@ public class PivotConfig implements Writeable, ToXContentObject { }); parser.declareObject(constructorArg(), - (p, c) -> (GroupConfig.fromXContent(p, lenient)), DataFrameField.GROUP_BY); + (p, c) -> (GroupConfig.fromXContent(p, lenient)), TransformField.GROUP_BY); - parser.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p, lenient), DataFrameField.AGGREGATIONS); - parser.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p, lenient), DataFrameField.AGGS); - parser.declareInt(optionalConstructorArg(), DataFrameField.MAX_PAGE_SEARCH_SIZE); + parser.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p, lenient), TransformField.AGGREGATIONS); + parser.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p, lenient), TransformField.AGGS); + parser.declareInt(optionalConstructorArg(), TransformField.MAX_PAGE_SEARCH_SIZE); return parser; } public PivotConfig(final GroupConfig groups, final AggregationConfig aggregationConfig, Integer maxPageSearchSize) { - this.groups = ExceptionsHelper.requireNonNull(groups, DataFrameField.GROUP_BY.getPreferredName()); - this.aggregationConfig = ExceptionsHelper.requireNonNull(aggregationConfig, DataFrameField.AGGREGATIONS.getPreferredName()); + this.groups = ExceptionsHelper.requireNonNull(groups, TransformField.GROUP_BY.getPreferredName()); + this.aggregationConfig = ExceptionsHelper.requireNonNull(aggregationConfig, TransformField.AGGREGATIONS.getPreferredName()); this.maxPageSearchSize = maxPageSearchSize; } @@ -91,10 +91,10 @@ public class PivotConfig implements Writeable, ToXContentObject { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(DataFrameField.GROUP_BY.getPreferredName(), groups); - builder.field(DataFrameField.AGGREGATIONS.getPreferredName(), aggregationConfig); + builder.field(TransformField.GROUP_BY.getPreferredName(), groups); + builder.field(TransformField.AGGREGATIONS.getPreferredName(), aggregationConfig); if (maxPageSearchSize != null) { - builder.field(DataFrameField.MAX_PAGE_SEARCH_SIZE.getPreferredName(), maxPageSearchSize); + builder.field(TransformField.MAX_PAGE_SEARCH_SIZE.getPreferredName(), maxPageSearchSize); } builder.endObject(); return builder; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/SingleGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/SingleGroupSource.java similarity index 98% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/SingleGroupSource.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/SingleGroupSource.java index ff1f9c3d54a..557b7e3f612 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/SingleGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/SingleGroupSource.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/TermsGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/TermsGroupSource.java similarity index 97% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/TermsGroupSource.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/TermsGroupSource.java index 891b160da07..e07ff611175 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/TermsGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/TermsGroupSource.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/utils/ExceptionsHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/utils/ExceptionsHelper.java similarity index 92% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/utils/ExceptionsHelper.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/utils/ExceptionsHelper.java index 8bfd558b209..4c0f1e12869 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/utils/ExceptionsHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/utils/ExceptionsHelper.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.utils; +package org.elasticsearch.xpack.core.transform.utils; /** * Collection of methods to aid in creating and checking for exceptions. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/utils/DataFrameStrings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/utils/TransformStrings.java similarity index 91% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/utils/DataFrameStrings.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/utils/TransformStrings.java index 22dd3526a24..78f36b5a838 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/utils/DataFrameStrings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/utils/TransformStrings.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.utils; +package org.elasticsearch.xpack.core.transform.utils; import org.elasticsearch.cluster.metadata.MetaData; @@ -12,7 +12,7 @@ import java.util.regex.Pattern; /** * Yet Another String utilities class. */ -public final class DataFrameStrings { +public final class TransformStrings { /** * Valid user id pattern. @@ -24,7 +24,7 @@ public final class DataFrameStrings { public static final int ID_LENGTH_LIMIT = 64; - private DataFrameStrings() { + private TransformStrings() { } public static boolean isValidId(String id) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleStats.java new file mode 100644 index 00000000000..fa018abc6c4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleStats.java @@ -0,0 +1,383 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * SnapshotLifecycleStats contains metrics and stats about snapshot lifecycle policy execution - how + * many snapshots were taken, deleted, how many failures, etc. It contains both global stats + * (snapshots taken, retention runs), and per-policy stats. + */ +public class SnapshotLifecycleStats implements Writeable, ToXContentObject { + + private final CounterMetric retentionRunCount = new CounterMetric(); + private final CounterMetric retentionFailedCount = new CounterMetric(); + private final CounterMetric retentionTimedOut = new CounterMetric(); + private final CounterMetric retentionTimeMs = new CounterMetric(); + private final Map policyStats; + + public static final ParseField RETENTION_RUNS = new ParseField("retention_runs"); + public static final ParseField RETENTION_FAILED = new ParseField("retention_failed"); + public static final ParseField RETENTION_TIMED_OUT = new ParseField("retention_timed_out"); + public static final ParseField RETENTION_TIME = new ParseField("retention_deletion_time"); + public static final ParseField RETENTION_TIME_MILLIS = new ParseField("retention_deletion_time_millis"); + public static final ParseField POLICY_STATS = new ParseField("policy_stats"); + public static final ParseField TOTAL_TAKEN = new ParseField("total_snapshots_taken"); + public static final ParseField TOTAL_FAILED = new ParseField("total_snapshots_failed"); + public static final ParseField TOTAL_DELETIONS = new ParseField("total_snapshots_deleted"); + public static final ParseField TOTAL_DELETION_FAILURES = new ParseField("total_snapshot_deletion_failures"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("snapshot_policy_stats", true, + a -> { + long runs = (long) a[0]; + long failed = (long) a[1]; + long timedOut = (long) a[2]; + long timeMs = (long) a[3]; + Map policyStatsMap = ((List) a[4]).stream() + .collect(Collectors.toMap(m -> m.policyId, Function.identity())); + return new SnapshotLifecycleStats(runs, failed, timedOut, timeMs, policyStatsMap); + }); + + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETENTION_RUNS); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETENTION_FAILED); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETENTION_TIMED_OUT); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), RETENTION_TIME_MILLIS); + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> SnapshotPolicyStats.parse(p, n), POLICY_STATS); + } + + public SnapshotLifecycleStats() { + this.policyStats = new ConcurrentHashMap<>(); + } + + // Package visible for testing + SnapshotLifecycleStats(long retentionRuns, long retentionFailed, long retentionTimedOut, long retentionTimeMs, + Map policyStats) { + this.retentionRunCount.inc(retentionRuns); + this.retentionFailedCount.inc(retentionFailed); + this.retentionTimedOut.inc(retentionTimedOut); + this.retentionTimeMs.inc(retentionTimeMs); + this.policyStats = policyStats; + } + + public SnapshotLifecycleStats(StreamInput in) throws IOException { + this.policyStats = new ConcurrentHashMap<>(in.readMap(StreamInput::readString, SnapshotPolicyStats::new)); + this.retentionRunCount.inc(in.readVLong()); + this.retentionFailedCount.inc(in.readVLong()); + this.retentionTimedOut.inc(in.readVLong()); + this.retentionTimeMs.inc(in.readVLong()); + } + + public static SnapshotLifecycleStats parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public SnapshotLifecycleStats merge(SnapshotLifecycleStats other) { + + HashMap newPolicyStats = new HashMap<>(this.policyStats); + // Merges the per-run stats (the stats in "other") with the stats already present + other.policyStats + .forEach((policyId, perRunPolicyStats) -> { + newPolicyStats.compute(policyId, (k, existingPolicyMetrics) -> { + if (existingPolicyMetrics == null) { + return perRunPolicyStats; + } else { + return existingPolicyMetrics.merge(perRunPolicyStats); + } + }); + }); + + return new SnapshotLifecycleStats(this.retentionRunCount.count() + other.retentionRunCount.count(), + this.retentionFailedCount.count() + other.retentionFailedCount.count(), + this.retentionTimedOut.count() + other.retentionTimedOut.count(), + this.retentionTimeMs.count() + other.retentionTimeMs.count(), + newPolicyStats); + } + + public SnapshotLifecycleStats removePolicy(String policyId) { + Map policyStats = new HashMap<>(this.policyStats); + policyStats.remove(policyId); + return new SnapshotLifecycleStats(this.retentionRunCount.count(), this.retentionFailedCount.count(), + this.retentionTimedOut.count(), this.retentionTimeMs.count(), + policyStats); + } + + /** + * @return a map of per-policy stats for each SLM policy + */ + public Map getMetrics() { + return Collections.unmodifiableMap(this.policyStats); + } + + /** + * Increment the number of times SLM retention has been run + */ + public void retentionRun() { + this.retentionRunCount.inc(); + } + + /** + * Increment the number of times SLM retention has failed + */ + public void retentionFailed() { + this.retentionFailedCount.inc(); + } + + /** + * Increment the number of times that SLM retention timed out due to the max delete time + * window being exceeded. + */ + public void retentionTimedOut() { + this.retentionTimedOut.inc(); + } + + /** + * Register the amount of time taken for deleting snapshots during SLM retention + */ + public void deletionTime(TimeValue elapsedTime) { + this.retentionTimeMs.inc(elapsedTime.millis()); + } + + /** + * Increment the per-policy snapshot taken count for the given policy id + */ + public void snapshotTaken(String slmPolicy) { + this.policyStats.computeIfAbsent(slmPolicy, SnapshotPolicyStats::new).snapshotTaken(); + } + + /** + * Increment the per-policy snapshot failure count for the given policy id + */ + public void snapshotFailed(String slmPolicy) { + this.policyStats.computeIfAbsent(slmPolicy, SnapshotPolicyStats::new).snapshotFailed(); + } + + /** + * Increment the per-policy snapshot deleted count for the given policy id + */ + public void snapshotDeleted(String slmPolicy) { + this.policyStats.computeIfAbsent(slmPolicy, SnapshotPolicyStats::new).snapshotDeleted(); + } + + /** + * Increment the per-policy snapshot deletion failure count for the given policy id + */ + public void snapshotDeleteFailure(String slmPolicy) { + this.policyStats.computeIfAbsent(slmPolicy, SnapshotPolicyStats::new).snapshotDeleteFailure(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(policyStats, StreamOutput::writeString, (v, o) -> o.writeTo(v)); + out.writeVLong(retentionRunCount.count()); + out.writeVLong(retentionFailedCount.count()); + out.writeVLong(retentionTimedOut.count()); + out.writeVLong(retentionTimeMs.count()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(RETENTION_RUNS.getPreferredName(), this.retentionRunCount.count()); + builder.field(RETENTION_FAILED.getPreferredName(), this.retentionFailedCount.count()); + builder.field(RETENTION_TIMED_OUT.getPreferredName(), this.retentionTimedOut.count()); + TimeValue retentionTime = TimeValue.timeValueMillis(this.retentionTimeMs.count()); + builder.field(RETENTION_TIME.getPreferredName(), retentionTime); + builder.field(RETENTION_TIME_MILLIS.getPreferredName(), retentionTime.millis()); + + Map metrics = getMetrics(); + long totalTaken = metrics.values().stream().mapToLong(s -> s.snapshotsTaken.count()).sum(); + long totalFailed = metrics.values().stream().mapToLong(s -> s.snapshotsFailed.count()).sum(); + long totalDeleted = metrics.values().stream().mapToLong(s -> s.snapshotsDeleted.count()).sum(); + long totalDeleteFailures = metrics.values().stream().mapToLong(s -> s.snapshotDeleteFailures.count()).sum(); + builder.field(TOTAL_TAKEN.getPreferredName(), totalTaken); + builder.field(TOTAL_FAILED.getPreferredName(), totalFailed); + builder.field(TOTAL_DELETIONS.getPreferredName(), totalDeleted); + builder.field(TOTAL_DELETION_FAILURES.getPreferredName(), totalDeleteFailures); + builder.startObject(POLICY_STATS.getPreferredName()); + for (Map.Entry policy : metrics.entrySet()) { + SnapshotPolicyStats perPolicyMetrics = policy.getValue(); + builder.startObject(perPolicyMetrics.policyId); + perPolicyMetrics.toXContent(builder, params); + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(retentionRunCount.count(), retentionFailedCount.count(), + retentionTimedOut.count(), retentionTimeMs.count(), policyStats); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + SnapshotLifecycleStats other = (SnapshotLifecycleStats) obj; + return Objects.equals(retentionRunCount.count(), other.retentionRunCount.count()) && + Objects.equals(retentionFailedCount.count(), other.retentionFailedCount.count()) && + Objects.equals(retentionTimedOut.count(), other.retentionTimedOut.count()) && + Objects.equals(retentionTimeMs.count(), other.retentionTimeMs.count()) && + Objects.equals(policyStats, other.policyStats); + } + + @Override + public String toString() { + return Strings.toString(this); + } + + public static class SnapshotPolicyStats implements Writeable, ToXContentFragment { + private final String policyId; + private final CounterMetric snapshotsTaken = new CounterMetric(); + private final CounterMetric snapshotsFailed = new CounterMetric(); + private final CounterMetric snapshotsDeleted = new CounterMetric(); + private final CounterMetric snapshotDeleteFailures = new CounterMetric(); + + public static final ParseField SNAPSHOTS_TAKEN = new ParseField("snapshots_taken"); + public static final ParseField SNAPSHOTS_FAILED = new ParseField("snapshots_failed"); + public static final ParseField SNAPSHOTS_DELETED = new ParseField("snapshots_deleted"); + public static final ParseField SNAPSHOT_DELETION_FAILURES = new ParseField("snapshot_deletion_failures"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("snapshot_policy_stats", true, + (a, id) -> { + long taken = (long) a[0]; + long failed = (long) a[1]; + long deleted = (long) a[2]; + long deleteFailed = (long) a[3]; + return new SnapshotPolicyStats(id, taken, failed, deleted, deleteFailed); + }); + + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SNAPSHOTS_TAKEN); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SNAPSHOTS_FAILED); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SNAPSHOTS_DELETED); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), SNAPSHOT_DELETION_FAILURES); + } + + public SnapshotPolicyStats(String slmPolicy) { + this.policyId = slmPolicy; + } + + public SnapshotPolicyStats(String policyId, long snapshotsTaken, long snapshotsFailed, long deleted, long failedDeletes) { + this.policyId = policyId; + this.snapshotsTaken.inc(snapshotsTaken); + this.snapshotsFailed.inc(snapshotsFailed); + this.snapshotsDeleted.inc(deleted); + this.snapshotDeleteFailures.inc(failedDeletes); + } + + public SnapshotPolicyStats(StreamInput in) throws IOException { + this.policyId = in.readString(); + this.snapshotsTaken.inc(in.readVLong()); + this.snapshotsFailed.inc(in.readVLong()); + this.snapshotsDeleted.inc(in.readVLong()); + this.snapshotDeleteFailures.inc(in.readVLong()); + } + + public static SnapshotPolicyStats parse(XContentParser parser, String policyId) { + return PARSER.apply(parser, policyId); + } + + public SnapshotPolicyStats merge(SnapshotPolicyStats other) { + return new SnapshotPolicyStats( + this.policyId, + this.snapshotsTaken.count() + other.snapshotsTaken.count(), + this.snapshotsFailed.count() + other.snapshotsFailed.count(), + this.snapshotsDeleted.count() + other.snapshotsDeleted.count(), + this.snapshotDeleteFailures.count() + other.snapshotDeleteFailures.count()); + } + + void snapshotTaken() { + snapshotsTaken.inc(); + } + + void snapshotFailed() { + snapshotsFailed.inc(); + } + + void snapshotDeleted() { + snapshotsDeleted.inc(); + } + + void snapshotDeleteFailure() { + snapshotDeleteFailures.inc(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(policyId); + out.writeVLong(snapshotsTaken.count()); + out.writeVLong(snapshotsFailed.count()); + out.writeVLong(snapshotsDeleted.count()); + out.writeVLong(snapshotDeleteFailures.count()); + } + + @Override + public int hashCode() { + return Objects.hash(policyId, snapshotsTaken.count(), snapshotsFailed.count(), + snapshotsDeleted.count(), snapshotDeleteFailures.count()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + SnapshotPolicyStats other = (SnapshotPolicyStats) obj; + return Objects.equals(policyId, other.policyId) && + Objects.equals(snapshotsTaken.count(), other.snapshotsTaken.count()) && + Objects.equals(snapshotsFailed.count(), other.snapshotsFailed.count()) && + Objects.equals(snapshotsDeleted.count(), other.snapshotsDeleted.count()) && + Objects.equals(snapshotDeleteFailures.count(), other.snapshotDeleteFailures.count()); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(SnapshotPolicyStats.SNAPSHOTS_TAKEN.getPreferredName(), snapshotsTaken.count()); + builder.field(SnapshotPolicyStats.SNAPSHOTS_FAILED.getPreferredName(), snapshotsFailed.count()); + builder.field(SnapshotPolicyStats.SNAPSHOTS_DELETED.getPreferredName(), snapshotsDeleted.count()); + builder.field(SnapshotPolicyStats.SNAPSHOT_DELETION_FAILURES.getPreferredName(), snapshotDeleteFailures.count()); + return builder; + } + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index 9fa22b82a8e..fe5950b5a0a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -491,4 +491,15 @@ public class XPackLicenseStateTests extends ESTestCase { assertAckMesssages(XPackField.SQL, randomTrialOrPlatinumMode(), randomBasicStandardOrGold(), 1); } + public void testTransformBasic() throws Exception { + assertAllowed(BASIC, true, XPackLicenseState::isTransformAllowed, true); + } + + public void testTransformStandard() throws Exception { + assertAllowed(STANDARD, true, XPackLicenseState::isTransformAllowed, true); + } + + public void testTransformInactiveBasic() { + assertAllowed(BASIC, false, XPackLicenseState::isTransformAllowed, false); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java index 14aae50b3b1..34acf179c3f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java @@ -65,7 +65,7 @@ import org.hamcrest.Matchers; import java.io.IOException; import java.nio.file.Path; -import java.util.Arrays; +import java.util.Collections; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; @@ -96,12 +96,13 @@ public class SourceOnlySnapshotShardTests extends IndexShardTestCase { repository.start(); try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); - IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () -> - runAsSnapshot(shard.getThreadPool(), - () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, - snapshotRef.getIndexCommit(), indexShardSnapshotStatus))); - assertEquals("Can't snapshot _source only on an index that has incomplete source ie. has _source disabled or filters the source" - , illegalStateException.getMessage()); + final PlainActionFuture future = PlainActionFuture.newFuture(); + runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, + snapshotRef.getIndexCommit(), indexShardSnapshotStatus, future)); + IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, future::actionGet); + assertEquals( + "Can't snapshot _source only on an index that has incomplete source ie. has _source disabled or filters the source", + illegalStateException.getMessage()); } closeShards(shard); } @@ -120,8 +121,10 @@ public class SourceOnlySnapshotShardTests extends IndexShardTestCase { try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); SnapshotId snapshotId = new SnapshotId("test", "test"); + final PlainActionFuture future = PlainActionFuture.newFuture(); runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, - snapshotRef.getIndexCommit(), indexShardSnapshotStatus)); + snapshotRef.getIndexCommit(), indexShardSnapshotStatus, future)); + future.actionGet(); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); totalFileCount = copy.getTotalFileCount(); @@ -134,8 +137,10 @@ public class SourceOnlySnapshotShardTests extends IndexShardTestCase { SnapshotId snapshotId = new SnapshotId("test_1", "test_1"); IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); + final PlainActionFuture future = PlainActionFuture.newFuture(); runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, - snapshotRef.getIndexCommit(), indexShardSnapshotStatus)); + snapshotRef.getIndexCommit(), indexShardSnapshotStatus, future)); + future.actionGet(); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); // we processed the segments_N file plus _1.si, _1.fdx, _1.fnm, _1.fdt assertEquals(5, copy.getIncrementalFileCount()); @@ -148,8 +153,10 @@ public class SourceOnlySnapshotShardTests extends IndexShardTestCase { SnapshotId snapshotId = new SnapshotId("test_2", "test_2"); IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); + final PlainActionFuture future = PlainActionFuture.newFuture(); runAsSnapshot(shard.getThreadPool(), () -> repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, - snapshotRef.getIndexCommit(), indexShardSnapshotStatus)); + snapshotRef.getIndexCommit(), indexShardSnapshotStatus, future)); + future.actionGet(); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); // we processed the segments_N file plus _1_1.liv assertEquals(2, copy.getIncrementalFileCount()); @@ -193,12 +200,15 @@ public class SourceOnlySnapshotShardTests extends IndexShardTestCase { repository.start(); try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { IndexShardSnapshotStatus indexShardSnapshotStatus = IndexShardSnapshotStatus.newInitializing(); + final PlainActionFuture future = PlainActionFuture.newFuture(); runAsSnapshot(shard.getThreadPool(), () -> { - repository.initializeSnapshot(snapshotId, Arrays.asList(indexId), - MetaData.builder().put(shard.indexSettings() - .getIndexMetaData(), false).build()); repository.snapshotShard(shard.store(), shard.mapperService(), snapshotId, indexId, snapshotRef.getIndexCommit(), - indexShardSnapshotStatus); + indexShardSnapshotStatus, future); + future.actionGet(); + repository.finalizeSnapshot(snapshotId, Collections.singletonList(indexId), + indexShardSnapshotStatus.asCopy().getStartTime(), null, 1, Collections.emptyList(), + repository.getRepositoryData().getGenId(), true, + MetaData.builder().put(shard.indexSettings().getIndexMetaData(), false).build(), Collections.emptyMap()); }); IndexShardSnapshotStatus.Copy copy = indexShardSnapshotStatus.asCopy(); assertEquals(copy.getTotalFileCount(), copy.getIncrementalFileCount()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java index 3704d56b819..275ae604e69 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditMessageTests.java @@ -26,6 +26,11 @@ public class AbstractAuditMessageTests extends AbstractXContentTestCase { - @Override - protected StartDataFrameTransformTaskAction.Request createTestInstance() { - return new StartDataFrameTransformTaskAction.Request(randomAlphaOfLength(4), randomBoolean()); - } - - @Override - protected Writeable.Reader instanceReader() { - return StartDataFrameTransformTaskAction.Request::new; - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionResponseTests.java deleted file mode 100644 index 62165f87968..00000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionResponseTests.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.core.dataframe.action; - -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.AbstractWireSerializingTestCase; - -public class StartDataFrameTransformTaskActionResponseTests extends - AbstractWireSerializingTestCase { - @Override - protected StartDataFrameTransformTaskAction.Response createTestInstance() { - return new StartDataFrameTransformTaskAction.Response(randomBoolean()); - } - - @Override - protected Writeable.Reader instanceReader() { - return StartDataFrameTransformTaskAction.Response::new; - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java deleted file mode 100644 index 4145d773eee..00000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointStatsTests.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.core.dataframe.transforms; - -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; - -public class DataFrameTransformCheckpointStatsTests extends AbstractSerializingDataFrameTestCase -{ - public static DataFrameTransformCheckpointStats randomDataFrameTransformCheckpointStats() { - return new DataFrameTransformCheckpointStats(randomLongBetween(1, 1_000_000), - DataFrameIndexerPositionTests.randomDataFrameIndexerPosition(), - randomBoolean() ? null : DataFrameTransformProgressTests.randomDataFrameTransformProgress(), - randomLongBetween(1, 1_000_000), randomLongBetween(0, 1_000_000)); - } - - @Override - protected DataFrameTransformCheckpointStats doParseInstance(XContentParser parser) throws IOException { - return DataFrameTransformCheckpointStats.fromXContent(parser); - } - - @Override - protected DataFrameTransformCheckpointStats createTestInstance() { - return randomDataFrameTransformCheckpointStats(); - } - - @Override - protected Reader instanceReader() { - return DataFrameTransformCheckpointStats::new; - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStoredDocTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStoredDocTests.java deleted file mode 100644 index 3466adf51a1..00000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStoredDocTests.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.core.dataframe.transforms; - -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; - -import java.io.IOException; -import java.util.Collections; - -public class DataFrameTransformStoredDocTests extends AbstractSerializingDataFrameTestCase { - - protected static ToXContent.Params TO_XCONTENT_PARAMS = new ToXContent.MapParams( - Collections.singletonMap(DataFrameField.FOR_INTERNAL_STORAGE, "true")); - - public static DataFrameTransformStoredDoc randomDataFrameTransformStoredDoc(String id) { - return new DataFrameTransformStoredDoc(id, - DataFrameTransformStateTests.randomDataFrameTransformState(), - DataFrameIndexerTransformStatsTests.randomStats()); - } - - public static DataFrameTransformStoredDoc randomDataFrameTransformStoredDoc() { - return randomDataFrameTransformStoredDoc(randomAlphaOfLengthBetween(1, 10)); - } - - @Override - protected DataFrameTransformStoredDoc doParseInstance(XContentParser parser) throws IOException { - return DataFrameTransformStoredDoc.PARSER.apply(parser, null); - } - - @Override - // Setting params for internal storage so that we can check XContent equivalence as - // DataFrameIndexerTransformStats does not write the ID to the XContentObject unless it is for internal storage - protected ToXContent.Params getToXContentParams() { - return TO_XCONTENT_PARAMS; - } - - @Override - protected DataFrameTransformStoredDoc createTestInstance() { - return randomDataFrameTransformStoredDoc(); - } - - @Override - protected Reader instanceReader() { - return DataFrameTransformStoredDoc::new; - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepTests.java index 6c152776794..b15ba2669ec 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SegmentCountStepTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.indices.segments.IndexSegments; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.segments.ShardSegments; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; @@ -130,6 +131,7 @@ public class SegmentCountStepTests extends AbstractStepTestCase indexShards = Collections.singletonMap(0, indexShardSegments); + ShardSegments shardSegmentsOne = Mockito.mock(ShardSegments.class); + ShardSegments[] shardSegmentsArray = new ShardSegments[] { shardSegmentsOne }; + Spliterator iss = indexShards.values().spliterator(); + List segments = new ArrayList<>(); + for (int i = 0; i < maxNumSegments + randomIntBetween(1, 3); i++) { + segments.add(null); + } + Mockito.when(indicesSegmentResponse.getStatus()).thenReturn(RestStatus.OK); + Mockito.when(indicesSegmentResponse.getIndices()).thenReturn(Collections.singletonMap(index.getName(), null)); + Mockito.when(indicesSegmentResponse.getShardFailures()) + .thenReturn(new DefaultShardOperationFailedException[]{new DefaultShardOperationFailedException(index.getName(), + 0, new IllegalArgumentException("fake"))}); + Mockito.when(indexSegments.spliterator()).thenReturn(iss); + Mockito.when(indexShardSegments.getShards()).thenReturn(shardSegmentsArray); + Mockito.when(shardSegmentsOne.getSegments()).thenReturn(segments); + + Mockito.when(client.admin()).thenReturn(adminClient); + Mockito.when(adminClient.indices()).thenReturn(indicesClient); + + Step.StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + + Mockito.doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(indicesSegmentResponse); + return null; + }).when(indicesClient).segments(any(), any()); + + SetOnce conditionMetResult = new SetOnce<>(); + SetOnce conditionInfo = new SetOnce<>(); + + SegmentCountStep step = new SegmentCountStep(stepKey, nextStepKey, client, maxNumSegments); + step.evaluateCondition(makeMeta(index), new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean conditionMet, ToXContentObject info) { + conditionMetResult.set(conditionMet); + conditionInfo.set(info); + } + + @Override + public void onFailure(Exception e) { + logger.warn("unexpected onFailure call", e); + throw new AssertionError("unexpected method call: " + e); + } + }); + + assertTrue(conditionMetResult.get()); + assertEquals(new SegmentCountStep.Info(-1L), conditionInfo.get()); + } + public void testThrowsException() { Exception exception = new RuntimeException("error"); Index index = new Index(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessageTests.java index f3a12b8a75b..c6a904228b6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/AnomalyDetectionAuditMessageTests.java @@ -8,11 +8,19 @@ package org.elasticsearch.xpack.core.ml.notifications; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.xpack.core.common.notifications.Level; +import org.elasticsearch.xpack.core.ml.job.config.Job; import java.util.Date; +import static org.hamcrest.Matchers.equalTo; + public class AnomalyDetectionAuditMessageTests extends AbstractXContentTestCase { + public void testGetJobType() { + AnomalyDetectionAuditMessage message = createTestInstance(); + assertThat(message.getJobType(), equalTo(Job.ANOMALY_DETECTOR_JOB_TYPE)); + } + @Override protected AnomalyDetectionAuditMessage doParseInstance(XContentParser parser) { return AnomalyDetectionAuditMessage.PARSER.apply(parser, null); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/DataFrameAnalyticsAuditMessageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/DataFrameAnalyticsAuditMessageTests.java new file mode 100644 index 00000000000..139e76160d4 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/notifications/DataFrameAnalyticsAuditMessageTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.notifications; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xpack.core.common.notifications.Level; + +import java.util.Date; + +import static org.hamcrest.Matchers.equalTo; + +public class DataFrameAnalyticsAuditMessageTests extends AbstractXContentTestCase { + + public void testGetJobType() { + DataFrameAnalyticsAuditMessage message = createTestInstance(); + assertThat(message.getJobType(), equalTo("data_frame_analytics")); + } + + @Override + protected DataFrameAnalyticsAuditMessage doParseInstance(XContentParser parser) { + return DataFrameAnalyticsAuditMessage.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected DataFrameAnalyticsAuditMessage createTestInstance() { + return new DataFrameAnalyticsAuditMessage( + randomBoolean() ? null : randomAlphaOfLength(10), + randomAlphaOfLengthBetween(1, 20), + randomFrom(Level.values()), + new Date(), + randomBoolean() ? null : randomAlphaOfLengthBetween(1, 20) + ); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java index 4d7d1b4758a..f78b9c2aa6f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java @@ -15,12 +15,14 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BitSet; import org.elasticsearch.client.Client; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryBuilders; @@ -237,9 +239,9 @@ public class DocumentSubsetBitsetCacheTests extends ESTestCase { try (DirectoryReader directoryReader = DirectoryReader.open(directory)) { final LeafReaderContext leaf = directoryReader.leaves().get(0); - final QueryShardContext context = new QueryShardContext(shardId.id(), indexSettings, null, null, null, mapperService, - null, null, xContentRegistry(), writableRegistry(), client, leaf.reader(), () -> nowInMillis, null); - + final QueryShardContext context = new QueryShardContext(shardId.id(), indexSettings, BigArrays.NON_RECYCLING_INSTANCE, + null, null, mapperService, null, null, xContentRegistry(), writableRegistry(), + client, new IndexSearcher(directoryReader), () -> nowInMillis, null); body.accept(context, leaf); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java index 3be46a031a0..8214d327491 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperIntegrationTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.IndexSettings; @@ -82,8 +83,9 @@ public class SecurityIndexReaderWrapperIntegrationTests extends AbstractBuilderT Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); final long nowInMillis = randomNonNegativeLong(); - QueryShardContext realQueryShardContext = new QueryShardContext(shardId.id(), indexSettings, null, null, null, mapperService, - null, null, xContentRegistry(), writableRegistry(), client, null, () -> nowInMillis, null); + QueryShardContext realQueryShardContext = new QueryShardContext(shardId.id(), indexSettings, BigArrays.NON_RECYCLING_INSTANCE, + null, null, mapperService, null, null, xContentRegistry(), writableRegistry(), + client, null, () -> nowInMillis, null); QueryShardContext queryShardContext = spy(realQueryShardContext); DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); XPackLicenseState licenseState = mock(XPackLicenseState.class); @@ -196,8 +198,9 @@ public class SecurityIndexReaderWrapperIntegrationTests extends AbstractBuilderT Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); final long nowInMillis = randomNonNegativeLong(); - QueryShardContext realQueryShardContext = new QueryShardContext(shardId.id(), indexSettings, null, null, null, mapperService, - null, null, xContentRegistry(), writableRegistry(), client, null, () -> nowInMillis, null); + QueryShardContext realQueryShardContext = new QueryShardContext(shardId.id(), indexSettings, BigArrays.NON_RECYCLING_INSTANCE, + null, null, mapperService, null, null, xContentRegistry(), writableRegistry(), + client, null, () -> nowInMillis, null); QueryShardContext queryShardContext = spy(realQueryShardContext); DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index ecedfc0c0e9..f1e7f1a9bfe 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -49,13 +49,6 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.action.XPackInfoAction; -import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsAction; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction; -import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.DeleteCalendarAction; @@ -139,6 +132,13 @@ import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; import org.elasticsearch.xpack.core.security.user.RemoteMonitoringUser; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.XPackUser; +import org.elasticsearch.xpack.core.transform.action.DeleteTransformAction; +import org.elasticsearch.xpack.core.transform.action.GetTransformsAction; +import org.elasticsearch.xpack.core.transform.action.GetTransformsStatsAction; +import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction; +import org.elasticsearch.xpack.core.transform.action.PutTransformAction; +import org.elasticsearch.xpack.core.transform.action.StartTransformAction; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction; import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; import org.elasticsearch.xpack.core.watcher.history.HistoryStoreField; import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchAction; @@ -334,7 +334,12 @@ public class ReservedRolesStoreTests extends ESTestCase { assertThat(kibanaRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(".reporting"), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), is(false)); - Arrays.asList(".kibana", ".kibana-devnull", ".reporting-" + randomAlphaOfLength(randomIntBetween(0, 13))).forEach((index) -> { + Arrays.asList( + ".kibana", + ".kibana-devnull", + ".reporting-" + randomAlphaOfLength(randomIntBetween(0, 13)), + ".apm-agent-configuration" + ).forEach((index) -> { logger.info("index name [{}]", index); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(index), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:bar").test(index), is(true)); @@ -1124,13 +1129,13 @@ public class ReservedRolesStoreTests extends ESTestCase { assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role role = Role.builder(roleDescriptor, null).build(); - assertThat(role.cluster().check(DeleteDataFrameTransformAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetDataFrameTransformsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetDataFrameTransformsStatsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PreviewDataFrameTransformAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PutDataFrameTransformAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(StartDataFrameTransformAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(StopDataFrameTransformAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(DeleteTransformAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetTransformsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetTransformsStatsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PreviewTransformAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PutTransformAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(StartTransformAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(StopTransformAction.NAME, request, authentication), is(true)); assertThat(role.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); @@ -1163,13 +1168,13 @@ public class ReservedRolesStoreTests extends ESTestCase { assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); Role role = Role.builder(roleDescriptor, null).build(); - assertThat(role.cluster().check(DeleteDataFrameTransformAction.NAME, request, authentication), is(false)); - assertThat(role.cluster().check(GetDataFrameTransformsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(GetDataFrameTransformsStatsAction.NAME, request, authentication), is(true)); - assertThat(role.cluster().check(PreviewDataFrameTransformAction.NAME, request, authentication), is(false)); - assertThat(role.cluster().check(PutDataFrameTransformAction.NAME, request, authentication), is(false)); - assertThat(role.cluster().check(StartDataFrameTransformAction.NAME, request, authentication), is(false)); - assertThat(role.cluster().check(StopDataFrameTransformAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(DeleteTransformAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(GetTransformsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(GetTransformsStatsAction.NAME, request, authentication), is(true)); + assertThat(role.cluster().check(PreviewTransformAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(PutTransformAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(StartTransformAction.NAME, request, authentication), is(false)); + assertThat(role.cluster().check(StopTransformAction.NAME, request, authentication), is(false)); assertThat(role.cluster().check(DelegatePkiAuthenticationAction.NAME, request, authentication), is(false)); assertThat(role.runAs().check(randomAlphaOfLengthBetween(1, 30)), is(false)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadataTests.java new file mode 100644 index 00000000000..1e9b1fa7178 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadataTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.slm; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ilm.OperationMode; +import org.elasticsearch.xpack.slm.SnapshotLifecycleStatsTests; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class SnapshotLifecycleMetadataTests extends AbstractSerializingTestCase { + @Override + protected SnapshotLifecycleMetadata doParseInstance(XContentParser parser) throws IOException { + return SnapshotLifecycleMetadata.PARSER.apply(parser, null); + } + + @Override + protected SnapshotLifecycleMetadata createTestInstance() { + int policyCount = randomIntBetween(0, 3); + Map policies = new HashMap<>(policyCount); + for (int i = 0; i < policyCount; i++) { + String id = "policy-" + randomAlphaOfLength(3); + policies.put(id, SnapshotLifecyclePolicyMetadataTests.createRandomPolicyMetadata(id)); + } + return new SnapshotLifecycleMetadata(policies, randomFrom(OperationMode.values()), + SnapshotLifecycleStatsTests.randomLifecycleStats()); + } + + @Override + protected Writeable.Reader instanceReader() { + return SnapshotLifecycleMetadata::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java index 04ab84d5ef9..183b0141caa 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyItemTests.java @@ -11,8 +11,9 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.slm.SnapshotLifecycleStatsTests; -import static org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests.createRandomPolicy; +import static org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests.randomSnapshotLifecyclePolicy; import static org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests.createRandomPolicyMetadata; public class SnapshotLifecyclePolicyItemTests extends AbstractWireSerializingTestCase { @@ -27,34 +28,39 @@ public class SnapshotLifecyclePolicyItemTests extends AbstractWireSerializingTes @Override protected SnapshotLifecyclePolicyItem createTestInstance() { - return new SnapshotLifecyclePolicyItem(createRandomPolicyMetadata(randomAlphaOfLengthBetween(5, 10)), randomSnapshotInProgress()); + String policyId = randomAlphaOfLengthBetween(5, 10); + return new SnapshotLifecyclePolicyItem(createRandomPolicyMetadata(policyId), randomSnapshotInProgress(), + SnapshotLifecycleStatsTests.randomPolicyStats(policyId)); } @Override protected SnapshotLifecyclePolicyItem mutateInstance(SnapshotLifecyclePolicyItem instance) { - switch (between(0, 5)) { + switch (between(0, 6)) { case 0: String newPolicyId = randomValueOtherThan(instance.getPolicy().getId(), () -> randomAlphaOfLengthBetween(5, 10)); - return new SnapshotLifecyclePolicyItem(createRandomPolicy(newPolicyId), + return new SnapshotLifecyclePolicyItem(randomSnapshotLifecyclePolicy(newPolicyId), instance.getVersion(), instance.getModifiedDate(), instance.getLastSuccess(), instance.getLastFailure(), - instance.getSnapshotInProgress()); + instance.getSnapshotInProgress(), + instance.getPolicyStats()); case 1: return new SnapshotLifecyclePolicyItem(instance.getPolicy(), randomValueOtherThan(instance.getVersion(), ESTestCase::randomNonNegativeLong), instance.getModifiedDate(), instance.getLastSuccess(), instance.getLastFailure(), - instance.getSnapshotInProgress()); + instance.getSnapshotInProgress(), + instance.getPolicyStats()); case 2: return new SnapshotLifecyclePolicyItem(instance.getPolicy(), instance.getVersion(), randomValueOtherThan(instance.getModifiedDate(), ESTestCase::randomNonNegativeLong), instance.getLastSuccess(), instance.getLastFailure(), - instance.getSnapshotInProgress()); + instance.getSnapshotInProgress(), + instance.getPolicyStats()); case 3: return new SnapshotLifecyclePolicyItem(instance.getPolicy(), instance.getVersion(), @@ -62,7 +68,8 @@ public class SnapshotLifecyclePolicyItemTests extends AbstractWireSerializingTes randomValueOtherThan(instance.getLastSuccess(), SnapshotInvocationRecordTests::randomSnapshotInvocationRecord), instance.getLastFailure(), - instance.getSnapshotInProgress()); + instance.getSnapshotInProgress(), + instance.getPolicyStats()); case 4: return new SnapshotLifecyclePolicyItem(instance.getPolicy(), instance.getVersion(), @@ -70,7 +77,8 @@ public class SnapshotLifecyclePolicyItemTests extends AbstractWireSerializingTes instance.getLastSuccess(), randomValueOtherThan(instance.getLastFailure(), SnapshotInvocationRecordTests::randomSnapshotInvocationRecord), - instance.getSnapshotInProgress()); + instance.getSnapshotInProgress(), + instance.getPolicyStats()); case 5: return new SnapshotLifecyclePolicyItem(instance.getPolicy(), instance.getVersion(), @@ -78,7 +86,17 @@ public class SnapshotLifecyclePolicyItemTests extends AbstractWireSerializingTes instance.getLastSuccess(), instance.getLastFailure(), randomValueOtherThan(instance.getSnapshotInProgress(), - SnapshotLifecyclePolicyItemTests::randomSnapshotInProgress)); + SnapshotLifecyclePolicyItemTests::randomSnapshotInProgress), + instance.getPolicyStats()); + case 6: + return new SnapshotLifecyclePolicyItem(instance.getPolicy(), + instance.getVersion(), + instance.getModifiedDate(), + instance.getLastSuccess(), + instance.getLastFailure(), + instance.getSnapshotInProgress(), + randomValueOtherThan(instance.getPolicyStats(), + () -> SnapshotLifecycleStatsTests.randomPolicyStats(instance.getPolicy().getId()))); default: throw new AssertionError("failure, got illegal switch case"); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java index 964cfd733b3..cf48615f675 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadataTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.slm; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.ESTestCase; @@ -50,7 +51,7 @@ public class SnapshotLifecyclePolicyMetadataTests extends AbstractSerializingTes switch (between(0, 5)) { case 0: return SnapshotLifecyclePolicyMetadata.builder(instance) - .setPolicy(randomValueOtherThan(instance.getPolicy(), () -> createRandomPolicy(randomAlphaOfLength(10)))) + .setPolicy(randomValueOtherThan(instance.getPolicy(), () -> randomSnapshotLifecyclePolicy(randomAlphaOfLength(10)))) .build(); case 1: return SnapshotLifecyclePolicyMetadata.builder(instance) @@ -81,7 +82,7 @@ public class SnapshotLifecyclePolicyMetadataTests extends AbstractSerializingTes public static SnapshotLifecyclePolicyMetadata createRandomPolicyMetadata(String policyId) { SnapshotLifecyclePolicyMetadata.Builder builder = SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(createRandomPolicy(policyId)) + .setPolicy(randomSnapshotLifecyclePolicy(policyId)) .setVersion(randomNonNegativeLong()) .setModifiedDate(randomNonNegativeLong()); if (randomBoolean()) { @@ -96,7 +97,7 @@ public class SnapshotLifecyclePolicyMetadataTests extends AbstractSerializingTes return builder.build(); } - public static SnapshotLifecyclePolicy createRandomPolicy(String policyId) { + public static SnapshotLifecyclePolicy randomSnapshotLifecyclePolicy(String policyId) { Map config = new HashMap<>(); for (int i = 0; i < randomIntBetween(2, 5); i++) { config.put(randomAlphaOfLength(4), randomAlphaOfLength(4)); @@ -105,10 +106,18 @@ public class SnapshotLifecyclePolicyMetadataTests extends AbstractSerializingTes randomAlphaOfLength(4), randomSchedule(), randomAlphaOfLength(4), - config); + config, + randomRetention()); } - private static String randomSchedule() { + public static SnapshotRetentionConfiguration randomRetention() { + return rarely() ? null : new SnapshotRetentionConfiguration( + rarely() ? null : TimeValue.parseTimeValue(randomTimeValue(), "random retention generation"), + rarely() ? null : randomIntBetween(1, 10), + rarely() ? null : randomIntBetween(15, 30)); + } + + public static String randomSchedule() { return randomIntBetween(0, 59) + " " + randomIntBetween(0, 59) + " " + randomIntBetween(0, 12) + " * * ?"; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStoreTests.java index 59dd546cba2..0a6a635490e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStoreTests.java @@ -77,7 +77,7 @@ public class SnapshotHistoryStoreTests extends ESTestCase { final long timestamp = randomNonNegativeLong(); SnapshotLifecyclePolicy.ResolverContext context = new SnapshotLifecyclePolicy.ResolverContext(timestamp); String snapshotId = policy.generateSnapshotName(context); - SnapshotHistoryItem record = SnapshotHistoryItem.successRecord(timestamp, policy, snapshotId); + SnapshotHistoryItem record = SnapshotHistoryItem.creationSuccessRecord(timestamp, policy, snapshotId); client.setVerifier((a, r, l) -> { fail("the history store is disabled, no action should have been taken"); @@ -94,7 +94,7 @@ public class SnapshotHistoryStoreTests extends ESTestCase { SnapshotLifecyclePolicy.ResolverContext context = new SnapshotLifecyclePolicy.ResolverContext(timestamp); String snapshotId = policy.generateSnapshotName(context); { - SnapshotHistoryItem record = SnapshotHistoryItem.successRecord(timestamp, policy, snapshotId); + SnapshotHistoryItem record = SnapshotHistoryItem.creationSuccessRecord(timestamp, policy, snapshotId); AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { @@ -132,7 +132,7 @@ public class SnapshotHistoryStoreTests extends ESTestCase { { final String cause = randomAlphaOfLength(9); Exception failureException = new RuntimeException(cause); - SnapshotHistoryItem record = SnapshotHistoryItem.failureRecord(timestamp, policy, snapshotId, failureException); + SnapshotHistoryItem record = SnapshotHistoryItem.creationFailureRecord(timestamp, policy, snapshotId, failureException); AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { @@ -373,7 +373,8 @@ public class SnapshotHistoryStoreTests extends ESTestCase { randomAlphaOfLength(4), randomSchedule(), randomAlphaOfLength(4), - config); + config, + null); } private static String randomSchedule() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java index ce043b4597d..ffdc8dace50 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java @@ -712,7 +712,7 @@ public class SSLServiceTests extends ESTestCase { SSLContext sslContext = sslService.sslContext(sslService.sslConfiguration(Settings.EMPTY)); try (CloseableHttpClient client = HttpClients.custom().setSSLContext(sslContext).build()) { // Execute a GET on a site known to have a valid certificate signed by a trusted public CA - // This will result in a SSLHandshakeException if the SSLContext does not trust the CA, but the default + // This will result in an SSLHandshakeException if the SSLContext does not trust the CA, but the default // truststore trusts all common public CAs so the handshake will succeed privilegedConnect(() -> client.execute(new HttpGet("https://www.elastic.co/")).close()); } @@ -745,7 +745,7 @@ public class SSLServiceTests extends ESTestCase { client.start(); // Execute a GET on a site known to have a valid certificate signed by a trusted public CA - // This will result in a SSLHandshakeException if the SSLContext does not trust the CA, but the default + // This will result in an SSLHandshakeException if the SSLContext does not trust the CA, but the default // truststore trusts all common public CAs so the handshake will succeed client.execute(new HttpHost("elastic.co", 443, "https"), new HttpGet("/"), new AssertionCallback()).get(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsageTests.java similarity index 57% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameFeatureSetUsageTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsageTests.java index 13dd2db0a4b..5cf462f50ba 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameFeatureSetUsageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsageTests.java @@ -4,33 +4,33 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe; +package org.elasticsearch.xpack.core.transform; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStatsTests; import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStatsTests; import java.util.HashMap; import java.util.Map; -public class DataFrameFeatureSetUsageTests extends AbstractWireSerializingTestCase { +public class TransformFeatureSetUsageTests extends AbstractWireSerializingTestCase { @Override - protected DataFrameFeatureSetUsage createTestInstance() { + protected TransformFeatureSetUsage createTestInstance() { Map transformCountByState = new HashMap<>(); if (randomBoolean()) { transformCountByState.put(randomFrom(IndexerState.values()).toString(), randomLong()); } - return new DataFrameFeatureSetUsage(randomBoolean(), randomBoolean(), transformCountByState, - DataFrameIndexerTransformStatsTests.randomStats()); + return new TransformFeatureSetUsage(randomBoolean(), randomBoolean(), transformCountByState, + TransformIndexerStatsTests.randomStats()); } @Override - protected Reader instanceReader() { - return DataFrameFeatureSetUsage::new; + protected Reader instanceReader() { + return TransformFeatureSetUsage::new; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessagesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/TransformMessagesTests.java similarity index 84% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessagesTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/TransformMessagesTests.java index b6284af6c58..c994a1dadb1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/DataFrameMessagesTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/TransformMessagesTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe; +package org.elasticsearch.xpack.core.transform; import org.elasticsearch.test.ESTestCase; @@ -14,16 +14,16 @@ import java.util.ArrayList; import java.util.List; import java.util.Locale; -public class DataFrameMessagesTests extends ESTestCase { +public class TransformMessagesTests extends ESTestCase { public void testGetMessage_WithFormatStrings() { - String formattedMessage = DataFrameMessages.getMessage(DataFrameMessages.REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_TIMEOUT, "30s", + String formattedMessage = TransformMessages.getMessage(TransformMessages.REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_TIMEOUT, "30s", "my_transform"); - assertEquals("Timed out after [30s] while waiting for data frame transform [my_transform] to stop", formattedMessage); + assertEquals("Timed out after [30s] while waiting for transform [my_transform] to stop", formattedMessage); } public void testMessageProperFormat() throws IllegalArgumentException, IllegalAccessException { - Field[] declaredFields = DataFrameMessages.class.getFields(); + Field[] declaredFields = TransformMessages.class.getFields(); int checkedMessages = 0; for (Field field : declaredFields) { @@ -31,7 +31,7 @@ public class DataFrameMessagesTests extends ESTestCase { if (java.lang.reflect.Modifier.isStatic(modifiers) && java.lang.reflect.Modifier.isFinal(modifiers) && field.getType().isAssignableFrom(String.class)) { - assertSingleMessage((String) field.get(DataFrameMessages.class)); + assertSingleMessage((String) field.get(TransformMessages.class)); ++checkedMessages; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractSerializingDataFrameTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/AbstractSerializingTransformTestCase.java similarity index 78% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractSerializingDataFrameTestCase.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/AbstractSerializingTransformTestCase.java index 14cbdef148c..5a077e3e709 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractSerializingDataFrameTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/AbstractSerializingTransformTestCase.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; @@ -13,17 +13,17 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.DataFrameNamedXContentProvider; -import org.elasticsearch.xpack.core.dataframe.transforms.SyncConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformNamedXContentProvider; +import org.elasticsearch.xpack.core.transform.transforms.SyncConfig; +import org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig; import org.junit.Before; import java.util.List; import static java.util.Collections.emptyList; -public abstract class AbstractSerializingDataFrameTestCase +public abstract class AbstractSerializingTransformTestCase extends AbstractSerializingTestCase { private NamedWriteableRegistry namedWriteableRegistry; @@ -34,11 +34,11 @@ public abstract class AbstractSerializingDataFrameTestCase namedWriteables = searchModule.getNamedWriteables(); - namedWriteables.add(new NamedWriteableRegistry.Entry(SyncConfig.class, DataFrameField.TIME_BASED_SYNC.getPreferredName(), + namedWriteables.add(new NamedWriteableRegistry.Entry(SyncConfig.class, TransformField.TIME_BASED_SYNC.getPreferredName(), TimeSyncConfig::new)); List namedXContents = searchModule.getNamedXContents(); - namedXContents.addAll(new DataFrameNamedXContentProvider().getNamedXContentParsers()); + namedXContents.addAll(new TransformNamedXContentProvider().getNamedXContentParsers()); namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); namedXContentRegistry = new NamedXContentRegistry(namedXContents); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractWireSerializingDataFrameTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/AbstractWireSerializingTransformTestCase.java similarity index 78% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractWireSerializingDataFrameTestCase.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/AbstractWireSerializingTransformTestCase.java index 47d7860b71d..5b2ba77973d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/AbstractWireSerializingDataFrameTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/AbstractWireSerializingTransformTestCase.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; @@ -12,17 +12,17 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.DataFrameNamedXContentProvider; -import org.elasticsearch.xpack.core.dataframe.transforms.SyncConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformNamedXContentProvider; +import org.elasticsearch.xpack.core.transform.transforms.SyncConfig; +import org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig; import org.junit.Before; import java.util.List; import static java.util.Collections.emptyList; -public abstract class AbstractWireSerializingDataFrameTestCase extends AbstractWireSerializingTestCase { +public abstract class AbstractWireSerializingTransformTestCase extends AbstractWireSerializingTestCase { /** * Test case that ensures aggregation named objects are registered */ @@ -34,11 +34,11 @@ public abstract class AbstractWireSerializingDataFrameTestCase namedWriteables = searchModule.getNamedWriteables(); - namedWriteables.add(new NamedWriteableRegistry.Entry(SyncConfig.class, DataFrameField.TIME_BASED_SYNC.getPreferredName(), + namedWriteables.add(new NamedWriteableRegistry.Entry(SyncConfig.class, TransformField.TIME_BASED_SYNC.getPreferredName(), TimeSyncConfig::new)); List namedXContents = searchModule.getNamedXContents(); - namedXContents.addAll(new DataFrameNamedXContentProvider().getNamedXContentParsers()); + namedXContents.addAll(new TransformNamedXContentProvider().getNamedXContentParsers()); namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); namedXContentRegistry = new NamedXContentRegistry(namedXContents); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformActionRequestTests.java similarity index 70% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformActionRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformActionRequestTests.java index 6220a08fb10..8bf27c097b4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformActionRequestTests.java @@ -4,13 +4,13 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformAction.Request; +import org.elasticsearch.xpack.core.transform.action.DeleteTransformAction.Request; -public class StartDataFrameTransformActionRequestTests extends AbstractWireSerializingTestCase { +public class DeleteTransformActionRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { return new Request(randomAlphaOfLengthBetween(1, 20), randomBoolean()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetTransformsActionRequestTests.java similarity index 74% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetTransformsActionRequestTests.java index 85413d89078..53cb7d01cf2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetTransformsActionRequestTests.java @@ -4,14 +4,14 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsAction.Request; +import org.elasticsearch.xpack.core.transform.action.GetTransformsAction.Request; -public class GetDataFrameTransformsActionRequestTests extends AbstractWireSerializingTestCase { +public class GetTransformsActionRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetTransformsActionResponseTests.java similarity index 72% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionResponseTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetTransformsActionResponseTests.java index 4a63ff787ff..8c274e2822d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetTransformsActionResponseTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.logging.LoggerMessageFormat; @@ -12,9 +12,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsAction.Response; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.xpack.core.transform.action.GetTransformsAction.Response; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests; import org.elasticsearch.xpack.core.watcher.watch.Payload.XContent; import java.io.IOException; @@ -22,15 +22,15 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; -public class GetDataFrameTransformsActionResponseTests extends AbstractWireSerializingDataFrameTestCase { +public class GetTransformsActionResponseTests extends AbstractWireSerializingTransformTestCase { public void testInvalidTransforms() throws IOException { - List transforms = new ArrayList<>(); + List transforms = new ArrayList<>(); - transforms.add(DataFrameTransformConfigTests.randomDataFrameTransformConfig()); - transforms.add(DataFrameTransformConfigTests.randomInvalidDataFrameTransformConfig()); - transforms.add(DataFrameTransformConfigTests.randomDataFrameTransformConfig()); - transforms.add(DataFrameTransformConfigTests.randomInvalidDataFrameTransformConfig()); + transforms.add(TransformConfigTests.randomTransformConfig()); + transforms.add(TransformConfigTests.randomInvalidDataFrameTransformConfig()); + transforms.add(TransformConfigTests.randomTransformConfig()); + transforms.add(TransformConfigTests.randomInvalidDataFrameTransformConfig()); Response r = new Response(transforms, transforms.size()); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); @@ -46,10 +46,10 @@ public class GetDataFrameTransformsActionResponseTests extends AbstractWireSeria @SuppressWarnings("unchecked") public void testNoHeaderInResponse() throws IOException { - List transforms = new ArrayList<>(); + List transforms = new ArrayList<>(); for (int i = 0; i < randomIntBetween(1, 10); ++i) { - transforms.add(DataFrameTransformConfigTests.randomDataFrameTransformConfig()); + transforms.add(TransformConfigTests.randomTransformConfig()); } Response r = new Response(transforms, transforms.size()); @@ -71,9 +71,9 @@ public class GetDataFrameTransformsActionResponseTests extends AbstractWireSeria @Override protected Response createTestInstance() { - List configs = new ArrayList<>(); + List configs = new ArrayList<>(); for (int i = 0; i < randomInt(10); ++i) { - configs.add(DataFrameTransformConfigTests.randomDataFrameTransformConfig()); + configs.add(TransformConfigTests.randomTransformConfig()); } return new Response(configs, randomNonNegativeLong()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetTransformsStatsActionRequestTests.java similarity index 73% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsActionRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetTransformsStatsActionRequestTests.java index 3e002688b88..d685d28a5e0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetTransformsStatsActionRequestTests.java @@ -4,14 +4,14 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction.Request; +import org.elasticsearch.xpack.core.transform.action.GetTransformsStatsAction.Request; -public class GetDataFrameTransformsStatsActionRequestTests extends AbstractWireSerializingTestCase { +public class GetTransformsStatsActionRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { if (randomBoolean()) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetTransformsStatsActionResponseTests.java similarity index 69% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsActionResponseTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetTransformsStatsActionResponseTests.java index 007401a4db8..ebb0fbf078a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/GetTransformsStatsActionResponseTests.java @@ -4,26 +4,26 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction.Response; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStatsTests; +import org.elasticsearch.xpack.core.transform.action.GetTransformsStatsAction.Response; +import org.elasticsearch.xpack.core.transform.transforms.TransformStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformStatsTests; import java.util.ArrayList; import java.util.List; -public class GetDataFrameTransformsStatsActionResponseTests extends AbstractWireSerializingDataFrameTestCase { +public class GetTransformsStatsActionResponseTests extends AbstractWireSerializingTransformTestCase { @Override protected Response createTestInstance() { - List stats = new ArrayList<>(); + List stats = new ArrayList<>(); int totalStats = randomInt(10); for (int i = 0; i < totalStats; ++i) { - stats.add(DataFrameTransformStatsTests.randomDataFrameTransformStats()); + stats.add(TransformStatsTests.randomDataFrameTransformStats()); } int totalErrors = randomInt(10); List taskFailures = new ArrayList<>(totalErrors); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformActionRequestTests.java similarity index 75% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformActionRequestTests.java index 5eaf9552492..7083d0ffc1d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformActionRequestTests.java @@ -4,24 +4,24 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAction.Request; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; -import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfigTests; +import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction.Request; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.DestConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfigTests; import java.io.IOException; -import static org.elasticsearch.xpack.core.dataframe.transforms.SourceConfigTests.randomSourceConfig; +import static org.elasticsearch.xpack.core.transform.transforms.SourceConfigTests.randomSourceConfig; -public class PreviewDataFrameTransformActionRequestTests extends AbstractSerializingDataFrameTestCase { +public class PreviewTransformActionRequestTests extends AbstractSerializingTransformTestCase { @Override protected Request doParseInstance(XContentParser parser) throws IOException { @@ -40,12 +40,12 @@ public class PreviewDataFrameTransformActionRequestTests extends AbstractSeriali @Override protected Request createTestInstance() { - DataFrameTransformConfig config = new DataFrameTransformConfig( + TransformConfig config = new TransformConfig( "transform-preview", randomSourceConfig(), new DestConfig("unused-transform-preview-index", null), null, - randomBoolean() ? DataFrameTransformConfigTests.randomSyncConfig() : null, + randomBoolean() ? TransformConfigTests.randomSyncConfig() : null, null, PivotConfigTests.randomPivotConfig(), null); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformsActionResponseTests.java similarity index 89% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformsActionResponseTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformsActionResponseTests.java index 2e29c737027..47fb8e3806e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformsActionResponseTests.java @@ -4,12 +4,12 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAction.Response; +import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction.Response; import java.io.IOException; import java.util.ArrayList; @@ -18,7 +18,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -public class PreviewDataFrameTransformsActionResponseTests extends AbstractSerializingTestCase { +public class PreviewTransformsActionResponseTests extends AbstractSerializingTestCase { @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformsActionResponseWireTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformsActionResponseWireTests.java similarity index 78% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformsActionResponseWireTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformsActionResponseWireTests.java index df4fe1c14b9..43bd5e39776 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PreviewDataFrameTransformsActionResponseWireTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PreviewTransformsActionResponseWireTests.java @@ -4,17 +4,17 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAction.Response; +import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction.Response; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; -public class PreviewDataFrameTransformsActionResponseWireTests extends AbstractWireSerializingDataFrameTestCase { +public class PreviewTransformsActionResponseWireTests extends AbstractWireSerializingTransformTestCase { @Override protected Response createTestInstance() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PutTransformActionRequestTests.java similarity index 64% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformActionRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PutTransformActionRequestTests.java index 757a0e44b74..1a84bc87550 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/PutTransformActionRequestTests.java @@ -4,26 +4,26 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction.Request; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; -import org.elasticsearch.xpack.core.dataframe.transforms.SyncConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.action.PutTransformAction.Request; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.SyncConfig; +import org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig; import org.junit.Before; import java.util.List; import static java.util.Collections.emptyList; -public class PutDataFrameTransformActionRequestTests extends AbstractWireSerializingTestCase { +public class PutTransformActionRequestTests extends AbstractWireSerializingTestCase { private String transformId; @Before @@ -38,7 +38,7 @@ public class PutDataFrameTransformActionRequestTests extends AbstractWireSeriali @Override protected Request createTestInstance() { - DataFrameTransformConfig config = DataFrameTransformConfigTests.randomDataFrameTransformConfigWithoutHeaders(transformId); + TransformConfig config = TransformConfigTests.randomDataFrameTransformConfigWithoutHeaders(transformId); return new Request(config, randomBoolean()); } @@ -47,7 +47,7 @@ public class PutDataFrameTransformActionRequestTests extends AbstractWireSeriali SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); List namedWriteables = searchModule.getNamedWriteables(); - namedWriteables.add(new NamedWriteableRegistry.Entry(SyncConfig.class, DataFrameField.TIME_BASED_SYNC.getPreferredName(), + namedWriteables.add(new NamedWriteableRegistry.Entry(SyncConfig.class, TransformField.TIME_BASED_SYNC.getPreferredName(), TimeSyncConfig::new)); return new NamedWriteableRegistry(namedWriteables); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/StartTransformActionRequestTests.java similarity index 61% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformActionRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/StartTransformActionRequestTests.java index e1b2c338acb..8a7933bcddb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/StartTransformActionRequestTests.java @@ -4,16 +4,16 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Request; +import org.elasticsearch.xpack.core.transform.action.StartTransformAction.Request; -public class DeleteDataFrameTransformActionRequestTests extends AbstractWireSerializingTestCase { +public class StartTransformActionRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { - return new Request(randomAlphaOfLengthBetween(1, 20), randomBoolean()); + return new Request(randomAlphaOfLengthBetween(1, 20)); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/StartTransformActionResponseTests.java similarity index 66% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionResponseTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/StartTransformActionResponseTests.java index afd5b6fa2db..7eaaf45ad5a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/StartTransformActionResponseTests.java @@ -4,12 +4,12 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction.Response; +import org.elasticsearch.xpack.core.transform.action.StartTransformAction.Response; -public class StopDataFrameTransformActionResponseTests extends AbstractWireSerializingDataFrameTestCase { +public class StartTransformActionResponseTests extends AbstractWireSerializingTransformTestCase { @Override protected Response createTestInstance() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/StopTransformActionRequestTests.java similarity index 87% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/StopTransformActionRequestTests.java index 7ad89094b19..84fe9560bab 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/StopTransformActionRequestTests.java @@ -4,21 +4,21 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction.Request; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction.Request; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; -public class StopDataFrameTransformActionRequestTests extends AbstractWireSerializingTestCase { +public class StopTransformActionRequestTests extends AbstractWireSerializingTestCase { @Override protected Request createTestInstance() { @@ -52,7 +52,7 @@ public class StopDataFrameTransformActionRequestTests extends AbstractWireSerial String dataFrameId = "dataframe-id"; Task dataFrameTask = new Task(1L, "persistent", "action", - DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + dataFrameId, + TransformField.PERSISTENT_TASK_DESCRIPTION_PREFIX + dataFrameId, TaskId.EMPTY_TASK_ID, Collections.emptyMap()); Request request = new Request("unrelated", false, false, null, false); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/StopTransformActionResponseTests.java similarity index 66% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformActionResponseTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/StopTransformActionResponseTests.java index d2cd377ffba..90810adbb17 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/StopTransformActionResponseTests.java @@ -4,12 +4,12 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformAction.Response; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction.Response; -public class StartDataFrameTransformActionResponseTests extends AbstractWireSerializingDataFrameTestCase { +public class StopTransformActionResponseTests extends AbstractWireSerializingTransformTestCase { @Override protected Response createTestInstance() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/UpdateDataFrameTransformActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformActionRequestTests.java similarity index 59% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/UpdateDataFrameTransformActionRequestTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformActionRequestTests.java index 3aa34dae067..cc79b01038b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/UpdateDataFrameTransformActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformActionRequestTests.java @@ -4,14 +4,14 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.xpack.core.dataframe.action.UpdateDataFrameTransformAction.Request; +import org.elasticsearch.xpack.core.transform.action.UpdateTransformAction.Request; -import static org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigUpdateTests.randomDataFrameTransformConfigUpdate; +import static org.elasticsearch.xpack.core.transform.transforms.TransformConfigUpdateTests.randomDataFrameTransformConfigUpdate; -public class UpdateDataFrameTransformActionRequestTests extends AbstractWireSerializingDataFrameTestCase { +public class UpdateTransformActionRequestTests extends AbstractWireSerializingTransformTestCase { @Override protected Writeable.Reader instanceReader() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/UpdateDataFrameTransformsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformsActionResponseTests.java similarity index 51% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/UpdateDataFrameTransformsActionResponseTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformsActionResponseTests.java index 6dffbc89302..035c964bd1f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/UpdateDataFrameTransformsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformsActionResponseTests.java @@ -4,21 +4,21 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.action; +package org.elasticsearch.xpack.core.transform.action; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.dataframe.action.UpdateDataFrameTransformAction.Response; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.xpack.core.transform.action.UpdateTransformAction.Response; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests; import java.io.IOException; -public class UpdateDataFrameTransformsActionResponseTests extends AbstractSerializingDataFrameTestCase { +public class UpdateTransformsActionResponseTests extends AbstractSerializingTransformTestCase { @Override protected Response createTestInstance() { - return new Response(DataFrameTransformConfigTests.randomDataFrameTransformConfigWithoutHeaders()); + return new Response(TransformConfigTests.randomDataFrameTransformConfigWithoutHeaders()); } @Override @@ -28,6 +28,6 @@ public class UpdateDataFrameTransformsActionResponseTests extends AbstractSerial @Override protected Response doParseInstance(XContentParser parser) throws IOException { - return new Response(DataFrameTransformConfig.fromXContent(parser, null, false)); + return new Response(TransformConfig.fromXContent(parser, null, false)); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/notifications/TransformAuditMessageTests.java similarity index 58% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessageTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/notifications/TransformAuditMessageTests.java index c39d61ab9d9..360632fe155 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/notifications/DataFrameAuditMessageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/notifications/TransformAuditMessageTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.notifications; +package org.elasticsearch.xpack.core.transform.notifications; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; @@ -11,11 +11,18 @@ import org.elasticsearch.xpack.core.common.notifications.Level; import java.util.Date; -public class DataFrameAuditMessageTests extends AbstractXContentTestCase { +import static org.hamcrest.Matchers.nullValue; + +public class TransformAuditMessageTests extends AbstractXContentTestCase { + + public void testGetJobType() { + TransformAuditMessage message = createTestInstance(); + assertThat(message.getJobType(), nullValue()); + } @Override - protected DataFrameAuditMessage doParseInstance(XContentParser parser) { - return DataFrameAuditMessage.PARSER.apply(parser, null); + protected TransformAuditMessage doParseInstance(XContentParser parser) { + return TransformAuditMessage.PARSER.apply(parser, null); } @Override @@ -24,8 +31,8 @@ public class DataFrameAuditMessageTests extends AbstractXContentTestCase +public abstract class AbstractSerializingTransformTestCase extends AbstractSerializingTestCase { protected static Params TO_XCONTENT_PARAMS = new ToXContent.MapParams( - Collections.singletonMap(DataFrameField.FOR_INTERNAL_STORAGE, "true")); + Collections.singletonMap(TransformField.FOR_INTERNAL_STORAGE, "true")); /** * Test case that ensures aggregation named objects are registered @@ -49,7 +49,7 @@ public abstract class AbstractSerializingDataFrameTestCase namedXContents = searchModule.getNamedXContents(); @@ -57,7 +57,7 @@ public abstract class AbstractSerializingDataFrameTestCase MockDeprecatedQueryBuilder.fromXContent(p))); namedXContents.add(new NamedXContentRegistry.Entry(BaseAggregationBuilder.class, new ParseField(MockDeprecatedAggregationBuilder.NAME), (p, c) -> MockDeprecatedAggregationBuilder.fromXContent(p))); - namedXContents.addAll(new DataFrameNamedXContentProvider().getNamedXContentParsers()); + namedXContents.addAll(new TransformNamedXContentProvider().getNamedXContentParsers()); namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); namedXContentRegistry = new NamedXContentRegistry(namedXContents); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/DestConfigTests.java similarity index 88% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfigTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/DestConfigTests.java index 094267ba4ad..ffbed1390b4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DestConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/DestConfigTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentParser; @@ -12,7 +12,7 @@ import org.junit.Before; import java.io.IOException; -public class DestConfigTests extends AbstractSerializingDataFrameTestCase { +public class DestConfigTests extends AbstractSerializingTransformTestCase { private boolean lenient; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/MockDeprecatedAggregationBuilder.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/MockDeprecatedAggregationBuilder.java similarity index 86% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/MockDeprecatedAggregationBuilder.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/MockDeprecatedAggregationBuilder.java index fd7fe91db98..03de07f2475 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/MockDeprecatedAggregationBuilder.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/MockDeprecatedAggregationBuilder.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.io.stream.StreamInput; @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -21,7 +22,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -65,8 +65,10 @@ public class MockDeprecatedAggregationBuilder extends ValuesSourceAggregationBui } @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + protected ValuesSourceAggregatorFactory innerBuild(QueryShardContext queryShardContext, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { return null; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/MockDeprecatedQueryBuilder.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/MockDeprecatedQueryBuilder.java similarity index 97% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/MockDeprecatedQueryBuilder.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/MockDeprecatedQueryBuilder.java index 147c72d2cb7..71235409145 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/MockDeprecatedQueryBuilder.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/MockDeprecatedQueryBuilder.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.Query; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/NodeAttributeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/NodeAttributeTests.java similarity index 96% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/NodeAttributeTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/NodeAttributeTests.java index fdc7692c412..15daf281757 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/NodeAttributeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/NodeAttributeTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfigTests.java similarity index 97% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfigTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfigTests.java index b967dcf06e8..9086f9901a5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/QueryConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfigTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; @@ -25,7 +25,7 @@ import org.junit.Before; import java.io.IOException; import java.util.LinkedHashMap; -public class QueryConfigTests extends AbstractSerializingDataFrameTestCase { +public class QueryConfigTests extends AbstractSerializingTransformTestCase { private boolean lenient; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/SourceConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfigTests.java similarity index 92% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/SourceConfigTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfigTests.java index b8eee446f37..972bb9cc831 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/SourceConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/SourceConfigTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentParser; @@ -13,7 +13,7 @@ import org.junit.Before; import java.io.IOException; import java.util.function.Predicate; -public class SourceConfigTests extends AbstractSerializingDataFrameTestCase { +public class SourceConfigTests extends AbstractSerializingTransformTestCase { private boolean lenient; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TimeSyncConfigTests.java similarity index 91% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfigTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TimeSyncConfigTests.java index f6775712c12..a9c18bed7e9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/TimeSyncConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TimeSyncConfigTests.java @@ -4,13 +4,12 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig; import java.io.IOException; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointStatsTests.java new file mode 100644 index 00000000000..8039758b373 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointStatsTests.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.transform.transforms; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class TransformCheckpointStatsTests extends AbstractSerializingTransformTestCase +{ + public static TransformCheckpointStats randomDataFrameTransformCheckpointStats() { + return new TransformCheckpointStats(randomLongBetween(1, 1_000_000), + TransformIndexerPositionTests.randomTransformIndexerPosition(), + randomBoolean() ? null : TransformProgressTests.randomTransformProgress(), + randomLongBetween(1, 1_000_000), randomLongBetween(0, 1_000_000)); + } + + @Override + protected TransformCheckpointStats doParseInstance(XContentParser parser) throws IOException { + return TransformCheckpointStats.fromXContent(parser); + } + + @Override + protected TransformCheckpointStats createTestInstance() { + return randomDataFrameTransformCheckpointStats(); + } + + @Override + protected Reader instanceReader() { + return TransformCheckpointStats::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointTests.java similarity index 62% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointTests.java index 298b018ce45..f70f6c68e01 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable.Reader; @@ -20,30 +20,30 @@ import java.util.TreeMap; import static org.elasticsearch.test.TestMatchers.matchesPattern; -public class DataFrameTransformCheckpointTests extends AbstractSerializingDataFrameTestCase { +public class TransformCheckpointTests extends AbstractSerializingTransformTestCase { - public static DataFrameTransformCheckpoint randomDataFrameTransformCheckpoints() { - return new DataFrameTransformCheckpoint(randomAlphaOfLengthBetween(1, 10), randomNonNegativeLong(), randomNonNegativeLong(), + public static TransformCheckpoint randomTransformCheckpoints() { + return new TransformCheckpoint(randomAlphaOfLengthBetween(1, 10), randomNonNegativeLong(), randomNonNegativeLong(), randomCheckpointsByIndex(), randomNonNegativeLong()); } @Override - protected DataFrameTransformCheckpoint doParseInstance(XContentParser parser) throws IOException { - return DataFrameTransformCheckpoint.fromXContent(parser, false); + protected TransformCheckpoint doParseInstance(XContentParser parser) throws IOException { + return TransformCheckpoint.fromXContent(parser, false); } @Override - protected DataFrameTransformCheckpoint createTestInstance() { - return randomDataFrameTransformCheckpoints(); + protected TransformCheckpoint createTestInstance() { + return randomTransformCheckpoints(); } @Override - protected Reader instanceReader() { - return DataFrameTransformCheckpoint::new; + protected Reader instanceReader() { + return TransformCheckpoint::new; } public void testXContentForInternalStorage() throws IOException { - DataFrameTransformCheckpoint dataFrameTransformCheckpoints = randomDataFrameTransformCheckpoints(); + TransformCheckpoint dataFrameTransformCheckpoints = randomTransformCheckpoints(); try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { XContentBuilder content = dataFrameTransformCheckpoints.toXContent(xContentBuilder, getToXContentParams()); @@ -62,12 +62,12 @@ public class DataFrameTransformCheckpointTests extends AbstractSerializingDataFr otherCheckpointsByIndex.put(randomAlphaOfLengthBetween(1, 10), new long[] { 1, 2, 3 }); long timeUpperBound = randomNonNegativeLong(); - DataFrameTransformCheckpoint dataFrameTransformCheckpoints = new DataFrameTransformCheckpoint(id, timestamp, checkpoint, + TransformCheckpoint dataFrameTransformCheckpoints = new TransformCheckpoint(id, timestamp, checkpoint, checkpointsByIndex, timeUpperBound); // same assertTrue(dataFrameTransformCheckpoints.matches(dataFrameTransformCheckpoints)); - DataFrameTransformCheckpoint dataFrameTransformCheckpointsCopy = copyInstance(dataFrameTransformCheckpoints); + TransformCheckpoint dataFrameTransformCheckpointsCopy = copyInstance(dataFrameTransformCheckpoints); // with copy assertTrue(dataFrameTransformCheckpoints.matches(dataFrameTransformCheckpointsCopy)); @@ -75,19 +75,19 @@ public class DataFrameTransformCheckpointTests extends AbstractSerializingDataFr // other id assertFalse(dataFrameTransformCheckpoints - .matches(new DataFrameTransformCheckpoint(id + "-1", timestamp, checkpoint, checkpointsByIndex, timeUpperBound))); + .matches(new TransformCheckpoint(id + "-1", timestamp, checkpoint, checkpointsByIndex, timeUpperBound))); // other timestamp assertTrue(dataFrameTransformCheckpoints - .matches(new DataFrameTransformCheckpoint(id, (timestamp / 2) + 1, checkpoint, checkpointsByIndex, timeUpperBound))); + .matches(new TransformCheckpoint(id, (timestamp / 2) + 1, checkpoint, checkpointsByIndex, timeUpperBound))); // other checkpoint assertTrue(dataFrameTransformCheckpoints - .matches(new DataFrameTransformCheckpoint(id, timestamp, (checkpoint / 2) + 1, checkpointsByIndex, timeUpperBound))); + .matches(new TransformCheckpoint(id, timestamp, (checkpoint / 2) + 1, checkpointsByIndex, timeUpperBound))); // other index checkpoints assertFalse(dataFrameTransformCheckpoints - .matches(new DataFrameTransformCheckpoint(id, timestamp, checkpoint, otherCheckpointsByIndex, timeUpperBound))); + .matches(new TransformCheckpoint(id, timestamp, checkpoint, otherCheckpointsByIndex, timeUpperBound))); // other time upper bound assertTrue(dataFrameTransformCheckpoints - .matches(new DataFrameTransformCheckpoint(id, timestamp, checkpoint, checkpointsByIndex, (timeUpperBound / 2) + 1))); + .matches(new TransformCheckpoint(id, timestamp, checkpoint, checkpointsByIndex, (timeUpperBound / 2) + 1))); } public void testGetBehind() { @@ -119,53 +119,53 @@ public class DataFrameTransformCheckpointTests extends AbstractSerializingDataFr long checkpoint = randomLongBetween(10, 100); - DataFrameTransformCheckpoint checkpointOld = new DataFrameTransformCheckpoint( + TransformCheckpoint checkpointOld = new TransformCheckpoint( id, timestamp, checkpoint, checkpointsByIndexOld, 0L); - DataFrameTransformCheckpoint checkpointTransientNew = new DataFrameTransformCheckpoint( + TransformCheckpoint checkpointTransientNew = new TransformCheckpoint( id, timestamp, -1L, checkpointsByIndexNew, 0L); - DataFrameTransformCheckpoint checkpointNew = new DataFrameTransformCheckpoint( + TransformCheckpoint checkpointNew = new TransformCheckpoint( id, timestamp, checkpoint + 1, checkpointsByIndexNew, 0L); - DataFrameTransformCheckpoint checkpointOlderButNewerShardsCheckpoint = new DataFrameTransformCheckpoint( + TransformCheckpoint checkpointOlderButNewerShardsCheckpoint = new TransformCheckpoint( id, timestamp, checkpoint - 1, checkpointsByIndexNew, 0L); - assertEquals(indices * shards * 10L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew)); - assertEquals(indices * shards * 10L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointNew)); + assertEquals(indices * shards * 10L, TransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew)); + assertEquals(indices * shards * 10L, TransformCheckpoint.getBehind(checkpointOld, checkpointNew)); // no difference for same checkpoints, transient or not - assertEquals(0L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointOld)); - assertEquals(0L, DataFrameTransformCheckpoint.getBehind(checkpointTransientNew, checkpointTransientNew)); - assertEquals(0L, DataFrameTransformCheckpoint.getBehind(checkpointNew, checkpointNew)); + assertEquals(0L, TransformCheckpoint.getBehind(checkpointOld, checkpointOld)); + assertEquals(0L, TransformCheckpoint.getBehind(checkpointTransientNew, checkpointTransientNew)); + assertEquals(0L, TransformCheckpoint.getBehind(checkpointNew, checkpointNew)); // new vs transient new: ok - assertEquals(0L, DataFrameTransformCheckpoint.getBehind(checkpointNew, checkpointTransientNew)); + assertEquals(0L, TransformCheckpoint.getBehind(checkpointNew, checkpointTransientNew)); // transient new vs new: illegal Exception e = expectThrows(IllegalArgumentException.class, - () -> DataFrameTransformCheckpoint.getBehind(checkpointTransientNew, checkpointNew)); + () -> TransformCheckpoint.getBehind(checkpointTransientNew, checkpointNew)); assertEquals("can not compare transient against a non transient checkpoint", e.getMessage()); // new vs old: illegal - e = expectThrows(IllegalArgumentException.class, () -> DataFrameTransformCheckpoint.getBehind(checkpointNew, checkpointOld)); + e = expectThrows(IllegalArgumentException.class, () -> TransformCheckpoint.getBehind(checkpointNew, checkpointOld)); assertEquals("old checkpoint is newer than new checkpoint", e.getMessage()); // corner case: the checkpoint appears older but the inner shard checkpoints are newer - assertEquals(-1L, DataFrameTransformCheckpoint.getBehind(checkpointOlderButNewerShardsCheckpoint, checkpointOld)); + assertEquals(-1L, TransformCheckpoint.getBehind(checkpointOlderButNewerShardsCheckpoint, checkpointOld)); // test cases where indices sets do not match // remove something from old, so newer has 1 index more than old: should be equivalent to old index existing but empty checkpointsByIndexOld.remove(checkpointsByIndexOld.firstKey()); - long behind = DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew); + long behind = TransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew); assertTrue("Expected behind (" + behind + ") => sum of shard checkpoint differences (" + indices * shards * 10L + ")", behind >= indices * shards * 10L); // remove same key: old and new should have equal indices again checkpointsByIndexNew.remove(checkpointsByIndexNew.firstKey()); - assertEquals((indices - 1) * shards * 10L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew)); + assertEquals((indices - 1) * shards * 10L, TransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew)); // remove 1st index from new, now old has 1 index more, which should be ignored checkpointsByIndexNew.remove(checkpointsByIndexNew.firstKey()); - assertEquals((indices - 2) * shards * 10L, DataFrameTransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew)); + assertEquals((indices - 2) * shards * 10L, TransformCheckpoint.getBehind(checkpointOld, checkpointTransientNew)); } private static Map randomCheckpointsByIndex() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointingInfoTests.java similarity index 53% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointingInfoTests.java index 18ccc142bd6..a7f271dc800 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformCheckpointingInfoTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformCheckpointingInfoTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -15,35 +15,35 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.time.Instant; -public class DataFrameTransformCheckpointingInfoTests extends AbstractSerializingDataFrameTestCase { +public class TransformCheckpointingInfoTests extends AbstractSerializingTransformTestCase { - public static DataFrameTransformCheckpointingInfo randomDataFrameTransformCheckpointingInfo() { - return new DataFrameTransformCheckpointingInfo( - DataFrameTransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(), - DataFrameTransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(), + public static TransformCheckpointingInfo randomDataFrameTransformCheckpointingInfo() { + return new TransformCheckpointingInfo( + TransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(), + TransformCheckpointStatsTests.randomDataFrameTransformCheckpointStats(), randomNonNegativeLong(), randomBoolean() ? null : Instant.ofEpochMilli(randomLongBetween(1, 100000))); } @Override - protected DataFrameTransformCheckpointingInfo doParseInstance(XContentParser parser) throws IOException { - return DataFrameTransformCheckpointingInfo.fromXContent(parser); + protected TransformCheckpointingInfo doParseInstance(XContentParser parser) throws IOException { + return TransformCheckpointingInfo.fromXContent(parser); } @Override - protected DataFrameTransformCheckpointingInfo createTestInstance() { + protected TransformCheckpointingInfo createTestInstance() { return randomDataFrameTransformCheckpointingInfo(); } @Override - protected Reader instanceReader() { - return DataFrameTransformCheckpointingInfo::new; + protected Reader instanceReader() { + return TransformCheckpointingInfo::new; } public void testBackwardsSerialization() throws IOException { - DataFrameTransformCheckpointingInfo checkpointingInfo = new DataFrameTransformCheckpointingInfo( - DataFrameTransformCheckpointStats.EMPTY, - DataFrameTransformCheckpointStats.EMPTY, + TransformCheckpointingInfo checkpointingInfo = new TransformCheckpointingInfo( + TransformCheckpointStats.EMPTY, + TransformCheckpointStats.EMPTY, randomNonNegativeLong(), // changesLastDetectedAt is not serialized to past values, so when it is pulled back in, it will be null null); @@ -52,7 +52,7 @@ public class DataFrameTransformCheckpointingInfoTests extends AbstractSerializin checkpointingInfo.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { in.setVersion(Version.V_7_4_0); - DataFrameTransformCheckpointingInfo streamedCheckpointingInfo = new DataFrameTransformCheckpointingInfo(in); + TransformCheckpointingInfo streamedCheckpointingInfo = new TransformCheckpointingInfo(in); assertEquals(checkpointingInfo, streamedCheckpointingInfo); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java similarity index 78% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java index 849ff1629c7..030f9c24ff5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; @@ -16,7 +16,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfigTests; import org.junit.Before; import java.io.IOException; @@ -25,26 +25,26 @@ import java.util.HashMap; import java.util.Map; import static org.elasticsearch.test.TestMatchers.matchesPattern; -import static org.elasticsearch.xpack.core.dataframe.transforms.DestConfigTests.randomDestConfig; -import static org.elasticsearch.xpack.core.dataframe.transforms.SourceConfigTests.randomInvalidSourceConfig; -import static org.elasticsearch.xpack.core.dataframe.transforms.SourceConfigTests.randomSourceConfig; +import static org.elasticsearch.xpack.core.transform.transforms.DestConfigTests.randomDestConfig; +import static org.elasticsearch.xpack.core.transform.transforms.SourceConfigTests.randomInvalidSourceConfig; +import static org.elasticsearch.xpack.core.transform.transforms.SourceConfigTests.randomSourceConfig; import static org.hamcrest.Matchers.equalTo; -public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameTestCase { +public class TransformConfigTests extends AbstractSerializingTransformTestCase { private String transformId; private boolean runWithHeaders; - public static DataFrameTransformConfig randomDataFrameTransformConfigWithoutHeaders() { + public static TransformConfig randomDataFrameTransformConfigWithoutHeaders() { return randomDataFrameTransformConfigWithoutHeaders(randomAlphaOfLengthBetween(1, 10)); } - public static DataFrameTransformConfig randomDataFrameTransformConfig() { - return randomDataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10)); + public static TransformConfig randomTransformConfig() { + return randomTransformConfig(randomAlphaOfLengthBetween(1, 10)); } - public static DataFrameTransformConfig randomDataFrameTransformConfigWithoutHeaders(String id) { - return new DataFrameTransformConfig(id, + public static TransformConfig randomDataFrameTransformConfigWithoutHeaders(String id) { + return new TransformConfig(id, randomSourceConfig(), randomDestConfig(), randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000)), @@ -56,8 +56,8 @@ public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameT null); } - public static DataFrameTransformConfig randomDataFrameTransformConfig(String id) { - return new DataFrameTransformConfig(id, + public static TransformConfig randomTransformConfig(String id) { + return new TransformConfig(id, randomSourceConfig(), randomDestConfig(), randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000)), @@ -69,13 +69,13 @@ public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameT randomBoolean() ? null : Version.CURRENT.toString()); } - public static DataFrameTransformConfig randomInvalidDataFrameTransformConfig() { + public static TransformConfig randomInvalidDataFrameTransformConfig() { if (randomBoolean()) { - return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomInvalidSourceConfig(), randomDestConfig(), + return new TransformConfig(randomAlphaOfLengthBetween(1, 10), randomInvalidSourceConfig(), randomDestConfig(), null, randomBoolean() ? randomSyncConfig() : null, randomHeaders(), PivotConfigTests.randomPivotConfig(), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000)); } // else - return new DataFrameTransformConfig(randomAlphaOfLengthBetween(1, 10), randomSourceConfig(), randomDestConfig(), + return new TransformConfig(randomAlphaOfLengthBetween(1, 10), randomSourceConfig(), randomDestConfig(), null, randomBoolean() ? randomSyncConfig() : null, randomHeaders(), PivotConfigTests.randomInvalidPivotConfig(), randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000)); } @@ -91,22 +91,22 @@ public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameT } @Override - protected DataFrameTransformConfig doParseInstance(XContentParser parser) throws IOException { + protected TransformConfig doParseInstance(XContentParser parser) throws IOException { if (randomBoolean()) { - return DataFrameTransformConfig.fromXContent(parser, transformId, runWithHeaders); + return TransformConfig.fromXContent(parser, transformId, runWithHeaders); } else { - return DataFrameTransformConfig.fromXContent(parser, null, runWithHeaders); + return TransformConfig.fromXContent(parser, null, runWithHeaders); } } @Override - protected DataFrameTransformConfig createTestInstance() { - return runWithHeaders ? randomDataFrameTransformConfig(transformId) : randomDataFrameTransformConfigWithoutHeaders(transformId); + protected TransformConfig createTestInstance() { + return runWithHeaders ? randomTransformConfig(transformId) : randomDataFrameTransformConfigWithoutHeaders(transformId); } @Override - protected Reader instanceReader() { - return DataFrameTransformConfig::new; + protected Reader instanceReader() { + return TransformConfig::new; } @Override @@ -137,7 +137,7 @@ public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameT + " \"field\": \"points\"" + "} } } } }"; - DataFrameTransformConfig dataFrameTransformConfig = createDataFrameTransformConfigFromString(pivotTransform, "test_match_all"); + TransformConfig dataFrameTransformConfig = createDataFrameTransformConfigFromString(pivotTransform, "test_match_all"); assertNotNull(dataFrameTransformConfig.getSource().getQueryConfig()); assertTrue(dataFrameTransformConfig.getSource().getQueryConfig().isValid()); @@ -213,7 +213,7 @@ public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameT } public void testXContentForInternalStorage() throws IOException { - DataFrameTransformConfig dataFrameTransformConfig = randomDataFrameTransformConfig(); + TransformConfig dataFrameTransformConfig = randomTransformConfig(); try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) { XContentBuilder content = dataFrameTransformConfig.toXContent(xContentBuilder, getToXContentParams()); @@ -231,11 +231,11 @@ public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameT } public void testMaxLengthDescription() { - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new DataFrameTransformConfig("id", + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new TransformConfig("id", randomSourceConfig(), randomDestConfig(), null, null, null, PivotConfigTests.randomPivotConfig(), randomAlphaOfLength(1001))); assertThat(exception.getMessage(), equalTo("[description] must be less than 1000 characters in length.")); String description = randomAlphaOfLength(1000); - DataFrameTransformConfig config = new DataFrameTransformConfig("id", + TransformConfig config = new TransformConfig("id", randomSourceConfig(), randomDestConfig(), null, null, null, PivotConfigTests.randomPivotConfig(), description); assertThat(description, equalTo(config.getDescription())); } @@ -257,7 +257,7 @@ public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameT + " \"field\": \"points\"" + "} } } } }"; - DataFrameTransformConfig dataFrameTransformConfig = createDataFrameTransformConfigFromString(pivotTransform, "body_id"); + TransformConfig dataFrameTransformConfig = createDataFrameTransformConfigFromString(pivotTransform, "body_id"); assertEquals("body_id", dataFrameTransformConfig.getId()); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, @@ -268,9 +268,9 @@ public class DataFrameTransformConfigTests extends AbstractSerializingDataFrameT } - private DataFrameTransformConfig createDataFrameTransformConfigFromString(String json, String id) throws IOException { + private TransformConfig createDataFrameTransformConfigFromString(String json, String id) throws IOException { final XContentParser parser = XContentType.JSON.xContent().createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); - return DataFrameTransformConfig.fromXContent(parser, id, false); + return TransformConfig.fromXContent(parser, id, false); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdateTests.java similarity index 71% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigUpdateTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdateTests.java index 1cc7574be19..b6ecff6bc5d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfigUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdateTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; @@ -14,22 +14,22 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfigTests; import java.io.IOException; import java.time.Instant; import java.util.Collections; import java.util.Map; -import static org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests.randomDataFrameTransformConfig; -import static org.elasticsearch.xpack.core.dataframe.transforms.DestConfigTests.randomDestConfig; -import static org.elasticsearch.xpack.core.dataframe.transforms.SourceConfigTests.randomSourceConfig; +import static org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests.randomTransformConfig; +import static org.elasticsearch.xpack.core.transform.transforms.DestConfigTests.randomDestConfig; +import static org.elasticsearch.xpack.core.transform.transforms.SourceConfigTests.randomSourceConfig; import static org.hamcrest.Matchers.equalTo; -public class DataFrameTransformConfigUpdateTests extends AbstractSerializingDataFrameTestCase { +public class TransformConfigUpdateTests extends AbstractSerializingTransformTestCase { - public static DataFrameTransformConfigUpdate randomDataFrameTransformConfigUpdate() { - return new DataFrameTransformConfigUpdate( + public static TransformConfigUpdate randomDataFrameTransformConfigUpdate() { + return new TransformConfigUpdate( randomBoolean() ? null : randomSourceConfig(), randomBoolean() ? null : randomDestConfig(), randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000)), @@ -42,33 +42,33 @@ public class DataFrameTransformConfigUpdateTests extends AbstractSerializingData } @Override - protected DataFrameTransformConfigUpdate doParseInstance(XContentParser parser) throws IOException { - return DataFrameTransformConfigUpdate.fromXContent(parser); + protected TransformConfigUpdate doParseInstance(XContentParser parser) throws IOException { + return TransformConfigUpdate.fromXContent(parser); } @Override - protected DataFrameTransformConfigUpdate createTestInstance() { + protected TransformConfigUpdate createTestInstance() { return randomDataFrameTransformConfigUpdate(); } @Override - protected Reader instanceReader() { - return DataFrameTransformConfigUpdate::new; + protected Reader instanceReader() { + return TransformConfigUpdate::new; } public void testIsNoop() { for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { - DataFrameTransformConfig config = randomDataFrameTransformConfig(); - DataFrameTransformConfigUpdate update = new DataFrameTransformConfigUpdate(null, null, null, null, null); + TransformConfig config = randomTransformConfig(); + TransformConfigUpdate update = new TransformConfigUpdate(null, null, null, null, null); assertTrue("null update is not noop", update.isNoop(config)); - update = new DataFrameTransformConfigUpdate(config.getSource(), + update = new TransformConfigUpdate(config.getSource(), config.getDestination(), config.getFrequency(), config.getSyncConfig(), config.getDescription()); assertTrue("equal update is not noop", update.isNoop(config)); - update = new DataFrameTransformConfigUpdate(config.getSource(), + update = new TransformConfigUpdate(config.getSource(), config.getDestination(), config.getFrequency(), config.getSyncConfig(), @@ -78,7 +78,7 @@ public class DataFrameTransformConfigUpdateTests extends AbstractSerializingData } public void testApply() { - DataFrameTransformConfig config = new DataFrameTransformConfig("time-transform", + TransformConfig config = new TransformConfig("time-transform", randomSourceConfig(), randomDestConfig(), TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000)), @@ -88,7 +88,7 @@ public class DataFrameTransformConfigUpdateTests extends AbstractSerializingData randomBoolean() ? null : randomAlphaOfLengthBetween(1, 1000), randomBoolean() ? null : Instant.now(), randomBoolean() ? null : Version.V_7_2_0.toString()); - DataFrameTransformConfigUpdate update = new DataFrameTransformConfigUpdate(null, null, null, null, null); + TransformConfigUpdate update = new TransformConfigUpdate(null, null, null, null, null); assertThat(config, equalTo(update.apply(config))); SourceConfig sourceConfig = new SourceConfig("the_new_index"); @@ -96,11 +96,11 @@ public class DataFrameTransformConfigUpdateTests extends AbstractSerializingData TimeValue frequency = TimeValue.timeValueSeconds(10); SyncConfig syncConfig = new TimeSyncConfig("time_field", TimeValue.timeValueSeconds(30)); String newDescription = "new description"; - update = new DataFrameTransformConfigUpdate(sourceConfig, destConfig, frequency, syncConfig, newDescription); + update = new TransformConfigUpdate(sourceConfig, destConfig, frequency, syncConfig, newDescription); Map headers = Collections.singletonMap("foo", "bar"); update.setHeaders(headers); - DataFrameTransformConfig updatedConfig = update.apply(config); + TransformConfig updatedConfig = update.apply(config); assertThat(updatedConfig.getSource(), equalTo(sourceConfig)); assertThat(updatedConfig.getDestination(), equalTo(destConfig)); @@ -112,7 +112,7 @@ public class DataFrameTransformConfigUpdateTests extends AbstractSerializingData } public void testApplyWithSyncChange() { - DataFrameTransformConfig batchConfig = new DataFrameTransformConfig("batch-transform", + TransformConfig batchConfig = new TransformConfig("batch-transform", randomSourceConfig(), randomDestConfig(), TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000)), @@ -123,7 +123,7 @@ public class DataFrameTransformConfigUpdateTests extends AbstractSerializingData randomBoolean() ? null : Instant.now(), randomBoolean() ? null : Version.CURRENT.toString()); - DataFrameTransformConfigUpdate update = new DataFrameTransformConfigUpdate(null, + TransformConfigUpdate update = new TransformConfigUpdate(null, null, null, TimeSyncConfigTests.randomTimeSyncConfig(), @@ -133,7 +133,7 @@ public class DataFrameTransformConfigUpdateTests extends AbstractSerializingData assertThat(ex.getMessage(), equalTo("Cannot change the current sync configuration of transform [batch-transform] from [null] to [time]")); - DataFrameTransformConfig timeSyncedConfig = new DataFrameTransformConfig("time-transform", + TransformConfig timeSyncedConfig = new TransformConfig("time-transform", randomSourceConfig(), randomDestConfig(), TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000)), @@ -144,7 +144,7 @@ public class DataFrameTransformConfigUpdateTests extends AbstractSerializingData randomBoolean() ? null : Instant.now(), randomBoolean() ? null : Version.CURRENT.toString()); - DataFrameTransformConfigUpdate fooSyncUpdate = new DataFrameTransformConfigUpdate(null, + TransformConfigUpdate fooSyncUpdate = new TransformConfigUpdate(null, null, null, new FooSync(), @@ -163,12 +163,12 @@ public class DataFrameTransformConfigUpdateTests extends AbstractSerializingData } @Override - public QueryBuilder getRangeQuery(DataFrameTransformCheckpoint newCheckpoint) { + public QueryBuilder getRangeQuery(TransformCheckpoint newCheckpoint) { return null; } @Override - public QueryBuilder getRangeQuery(DataFrameTransformCheckpoint oldCheckpoint, DataFrameTransformCheckpoint newCheckpoint) { + public QueryBuilder getRangeQuery(TransformCheckpoint oldCheckpoint, TransformCheckpoint newCheckpoint) { return null; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerPositionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerPositionTests.java similarity index 69% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerPositionTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerPositionTests.java index dd57a0302a4..770fd5756f7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerPositionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerPositionTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentParser; @@ -15,20 +15,20 @@ import java.util.HashMap; import java.util.Map; import java.util.function.Predicate; -public class DataFrameIndexerPositionTests extends AbstractSerializingTestCase { +public class TransformIndexerPositionTests extends AbstractSerializingTestCase { - public static DataFrameIndexerPosition randomDataFrameIndexerPosition() { - return new DataFrameIndexerPosition(randomPosition(), randomPosition()); + public static TransformIndexerPosition randomTransformIndexerPosition() { + return new TransformIndexerPosition(randomPosition(), randomPosition()); } @Override - protected DataFrameIndexerPosition createTestInstance() { - return randomDataFrameIndexerPosition(); + protected TransformIndexerPosition createTestInstance() { + return randomTransformIndexerPosition(); } @Override - protected Reader instanceReader() { - return DataFrameIndexerPosition::new; + protected Reader instanceReader() { + return TransformIndexerPosition::new; } @Override @@ -42,8 +42,8 @@ public class DataFrameIndexerPositionTests extends AbstractSerializingTestCase randomPosition() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerStatsTests.java similarity index 71% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStatsTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerStatsTests.java index 3313ae8d145..05866c9c0b1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameIndexerTransformStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformIndexerStatsTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; @@ -13,25 +13,25 @@ import org.elasticsearch.test.AbstractSerializingTestCase; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; -public class DataFrameIndexerTransformStatsTests extends AbstractSerializingTestCase { +public class TransformIndexerStatsTests extends AbstractSerializingTestCase { @Override - protected DataFrameIndexerTransformStats createTestInstance() { + protected TransformIndexerStats createTestInstance() { return randomStats(); } @Override - protected Writeable.Reader instanceReader() { - return DataFrameIndexerTransformStats::new; + protected Writeable.Reader instanceReader() { + return TransformIndexerStats::new; } @Override - protected DataFrameIndexerTransformStats doParseInstance(XContentParser parser) { - return DataFrameIndexerTransformStats.fromXContent(parser); + protected TransformIndexerStats doParseInstance(XContentParser parser) { + return TransformIndexerStats.fromXContent(parser); } - public static DataFrameIndexerTransformStats randomStats() { - return new DataFrameIndexerTransformStats(randomLongBetween(10L, 10000L), + public static TransformIndexerStats randomStats() { + return new TransformIndexerStats(randomLongBetween(10L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), randomLongBetween(0L, 10000L), @@ -41,7 +41,7 @@ public class DataFrameIndexerTransformStatsTests extends AbstractSerializingTest } public void testExpAvgIncrement() { - DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); + TransformIndexerStats stats = new TransformIndexerStats(); assertThat(stats.getExpAvgCheckpointDurationMs(), equalTo(0.0)); assertThat(stats.getExpAvgDocumentsIndexed(), equalTo(0.0)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgressTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgressTests.java similarity index 61% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgressTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgressTests.java index ccd61a6dd44..f8b79c6e994 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformProgressTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgressTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -19,59 +19,59 @@ import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -public class DataFrameTransformProgressTests extends AbstractSerializingDataFrameTestCase { +public class TransformProgressTests extends AbstractSerializingTransformTestCase { - public static DataFrameTransformProgress randomDataFrameTransformProgress() { - return new DataFrameTransformProgress( + public static TransformProgress randomTransformProgress() { + return new TransformProgress( randomBoolean() ? null : randomLongBetween(0, 10000), randomBoolean() ? null : randomLongBetween(0, 10000), randomBoolean() ? null : randomLongBetween(1, 10000)); } @Override - protected DataFrameTransformProgress doParseInstance(XContentParser parser) throws IOException { - return DataFrameTransformProgress.PARSER.apply(parser, null); + protected TransformProgress doParseInstance(XContentParser parser) throws IOException { + return TransformProgress.PARSER.apply(parser, null); } @Override - protected DataFrameTransformProgress createTestInstance() { - return randomDataFrameTransformProgress(); + protected TransformProgress createTestInstance() { + return randomTransformProgress(); } @Override - protected Reader instanceReader() { - return DataFrameTransformProgress::new; + protected Reader instanceReader() { + return TransformProgress::new; } public void testPercentComplete() { - DataFrameTransformProgress progress = new DataFrameTransformProgress(0L, 100L, null); + TransformProgress progress = new TransformProgress(0L, 100L, null); assertThat(progress.getPercentComplete(), equalTo(100.0)); - progress = new DataFrameTransformProgress(100L, 0L, null); + progress = new TransformProgress(100L, 0L, null); assertThat(progress.getPercentComplete(), equalTo(0.0)); - progress = new DataFrameTransformProgress(100L, 10000L, null); + progress = new TransformProgress(100L, 10000L, null); assertThat(progress.getPercentComplete(), equalTo(100.0)); - progress = new DataFrameTransformProgress(100L, null, null); + progress = new TransformProgress(100L, null, null); assertThat(progress.getPercentComplete(), equalTo(0.0)); - progress = new DataFrameTransformProgress(100L, 50L, null); + progress = new TransformProgress(100L, 50L, null); assertThat(progress.getPercentComplete(), closeTo(50.0, 0.000001)); - progress = new DataFrameTransformProgress(null, 50L, 10L); + progress = new TransformProgress(null, 50L, 10L); assertThat(progress.getPercentComplete(), is(nullValue())); } public void testConstructor() { IllegalArgumentException ex = - expectThrows(IllegalArgumentException.class, () -> new DataFrameTransformProgress(-1L, null, null)); + expectThrows(IllegalArgumentException.class, () -> new TransformProgress(-1L, null, null)); assertThat(ex.getMessage(), equalTo("[total_docs] must be >0.")); - ex = expectThrows(IllegalArgumentException.class, () -> new DataFrameTransformProgress(1L, -1L, null)); + ex = expectThrows(IllegalArgumentException.class, () -> new TransformProgress(1L, -1L, null)); assertThat(ex.getMessage(), equalTo("[docs_processed] must be >0.")); - ex = expectThrows(IllegalArgumentException.class, () -> new DataFrameTransformProgress(1L, 1L, -1L)); + ex = expectThrows(IllegalArgumentException.class, () -> new TransformProgress(1L, 1L, -1L)); assertThat(ex.getMessage(), equalTo("[docs_indexed] must be >0.")); } @@ -79,25 +79,25 @@ public class DataFrameTransformProgressTests extends AbstractSerializingDataFram long totalDocs = 10_000; long processedDocs = randomLongBetween(0, totalDocs); // documentsIndexed are not in past versions, so it would be zero coming in - DataFrameTransformProgress progress = new DataFrameTransformProgress(totalDocs, processedDocs, 0L); + TransformProgress progress = new TransformProgress(totalDocs, processedDocs, 0L); try (BytesStreamOutput output = new BytesStreamOutput()) { output.setVersion(Version.V_7_2_0); progress.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { in.setVersion(Version.V_7_2_0); - DataFrameTransformProgress streamedProgress = new DataFrameTransformProgress(in); + TransformProgress streamedProgress = new TransformProgress(in); assertEquals(progress, streamedProgress); } } - progress = new DataFrameTransformProgress(null, processedDocs, 0L); + progress = new TransformProgress(null, processedDocs, 0L); try (BytesStreamOutput output = new BytesStreamOutput()) { output.setVersion(Version.V_7_2_0); progress.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { in.setVersion(Version.V_7_2_0); - DataFrameTransformProgress streamedProgress = new DataFrameTransformProgress(in); - assertEquals(new DataFrameTransformProgress(0L, 0L, 0L), streamedProgress); + TransformProgress streamedProgress = new TransformProgress(in); + assertEquals(new TransformProgress(0L, 0L, 0L), streamedProgress); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStateTests.java similarity index 53% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStateTests.java index cc6fe88e5b2..3bd90944894 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStateTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentParser; @@ -14,34 +14,34 @@ import org.elasticsearch.xpack.core.indexing.IndexerState; import java.io.IOException; import java.util.function.Predicate; -import static org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgressTests.randomDataFrameTransformProgress; -import static org.elasticsearch.xpack.core.dataframe.transforms.NodeAttributeTests.randomNodeAttributes; +import static org.elasticsearch.xpack.core.transform.transforms.TransformProgressTests.randomTransformProgress; +import static org.elasticsearch.xpack.core.transform.transforms.NodeAttributeTests.randomNodeAttributes; -public class DataFrameTransformStateTests extends AbstractSerializingTestCase { +public class TransformStateTests extends AbstractSerializingTestCase { - public static DataFrameTransformState randomDataFrameTransformState() { - return new DataFrameTransformState(randomFrom(DataFrameTransformTaskState.values()), + public static TransformState randomDataFrameTransformState() { + return new TransformState(randomFrom(TransformTaskState.values()), randomFrom(IndexerState.values()), - DataFrameIndexerPositionTests.randomDataFrameIndexerPosition(), + TransformIndexerPositionTests.randomTransformIndexerPosition(), randomLongBetween(0,10), randomBoolean() ? null : randomAlphaOfLength(10), - randomBoolean() ? null : randomDataFrameTransformProgress(), + randomBoolean() ? null : randomTransformProgress(), randomBoolean() ? null : randomNodeAttributes()); } @Override - protected DataFrameTransformState doParseInstance(XContentParser parser) throws IOException { - return DataFrameTransformState.fromXContent(parser); + protected TransformState doParseInstance(XContentParser parser) throws IOException { + return TransformState.fromXContent(parser); } @Override - protected DataFrameTransformState createTestInstance() { + protected TransformState createTestInstance() { return randomDataFrameTransformState(); } @Override - protected Reader instanceReader() { - return DataFrameTransformState::new; + protected Reader instanceReader() { + return TransformState::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStatsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStatsTests.java similarity index 55% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStatsTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStatsTests.java index f438d6cfcf6..093e05f4f90 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformStatsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStatsTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentParser; @@ -13,30 +13,30 @@ import org.elasticsearch.test.AbstractSerializingTestCase; import java.io.IOException; import java.util.function.Predicate; -public class DataFrameTransformStatsTests extends AbstractSerializingTestCase { +public class TransformStatsTests extends AbstractSerializingTestCase { - public static DataFrameTransformStats randomDataFrameTransformStats() { - return new DataFrameTransformStats(randomAlphaOfLength(10), - randomFrom(DataFrameTransformStats.State.values()), + public static TransformStats randomDataFrameTransformStats() { + return new TransformStats(randomAlphaOfLength(10), + randomFrom(TransformStats.State.values()), randomBoolean() ? null : randomAlphaOfLength(100), randomBoolean() ? null : NodeAttributeTests.randomNodeAttributes(), - DataFrameIndexerTransformStatsTests.randomStats(), - DataFrameTransformCheckpointingInfoTests.randomDataFrameTransformCheckpointingInfo()); + TransformIndexerStatsTests.randomStats(), + TransformCheckpointingInfoTests.randomDataFrameTransformCheckpointingInfo()); } @Override - protected DataFrameTransformStats doParseInstance(XContentParser parser) throws IOException { - return DataFrameTransformStats.fromXContent(parser); + protected TransformStats doParseInstance(XContentParser parser) throws IOException { + return TransformStats.fromXContent(parser); } @Override - protected DataFrameTransformStats createTestInstance() { + protected TransformStats createTestInstance() { return randomDataFrameTransformStats(); } @Override - protected Reader instanceReader() { - return DataFrameTransformStats::new; + protected Reader instanceReader() { + return TransformStats::new; } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStoredDocTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStoredDocTests.java new file mode 100644 index 00000000000..0f20fd0462f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStoredDocTests.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.transform.transforms; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.transform.TransformField; + +import java.io.IOException; +import java.util.Collections; + +public class TransformStoredDocTests extends AbstractSerializingTransformTestCase { + + protected static ToXContent.Params TO_XCONTENT_PARAMS = new ToXContent.MapParams( + Collections.singletonMap(TransformField.FOR_INTERNAL_STORAGE, "true")); + + public static TransformStoredDoc randomTransformStoredDoc(String id) { + return new TransformStoredDoc(id, + TransformStateTests.randomDataFrameTransformState(), + TransformIndexerStatsTests.randomStats()); + } + + public static TransformStoredDoc randomDataFrameTransformStoredDoc() { + return randomTransformStoredDoc(randomAlphaOfLengthBetween(1, 10)); + } + + @Override + protected TransformStoredDoc doParseInstance(XContentParser parser) throws IOException { + return TransformStoredDoc.PARSER.apply(parser, null); + } + + @Override + // Setting params for internal storage so that we can check XContent equivalence as + // DataFrameIndexerTransformStats does not write the ID to the XContentObject unless it is for internal storage + protected ToXContent.Params getToXContentParams() { + return TO_XCONTENT_PARAMS; + } + + @Override + protected TransformStoredDoc createTestInstance() { + return randomDataFrameTransformStoredDoc(); + } + + @Override + protected Reader instanceReader() { + return TransformStoredDoc::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTaskStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskStateTests.java similarity index 69% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTaskStateTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskStateTests.java index 62c73846f59..7f52f347faf 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTaskStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskStateTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -14,31 +14,31 @@ import java.io.IOException; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -public class DataFrameTransformTaskStateTests extends ESTestCase { +public class TransformTaskStateTests extends ESTestCase { public void testValidOrdinals() { - assertThat(DataFrameTransformTaskState.STOPPED.ordinal(), equalTo(0)); - assertThat(DataFrameTransformTaskState.STARTED.ordinal(), equalTo(1)); - assertThat(DataFrameTransformTaskState.FAILED.ordinal(), equalTo(2)); + assertThat(TransformTaskState.STOPPED.ordinal(), equalTo(0)); + assertThat(TransformTaskState.STARTED.ordinal(), equalTo(1)); + assertThat(TransformTaskState.FAILED.ordinal(), equalTo(2)); } public void testwriteTo() throws Exception { try (BytesStreamOutput out = new BytesStreamOutput()) { - DataFrameTransformTaskState.STOPPED.writeTo(out); + TransformTaskState.STOPPED.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { assertThat(in.readVInt(), equalTo(0)); } } try (BytesStreamOutput out = new BytesStreamOutput()) { - DataFrameTransformTaskState.STARTED.writeTo(out); + TransformTaskState.STARTED.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { assertThat(in.readVInt(), equalTo(1)); } } try (BytesStreamOutput out = new BytesStreamOutput()) { - DataFrameTransformTaskState.FAILED.writeTo(out); + TransformTaskState.FAILED.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { assertThat(in.readVInt(), equalTo(2)); } @@ -49,19 +49,19 @@ public class DataFrameTransformTaskStateTests extends ESTestCase { try (BytesStreamOutput out = new BytesStreamOutput()) { out.writeVInt(0); try (StreamInput in = out.bytes().streamInput()) { - assertThat(DataFrameTransformTaskState.fromStream(in), equalTo(DataFrameTransformTaskState.STOPPED)); + assertThat(TransformTaskState.fromStream(in), equalTo(TransformTaskState.STOPPED)); } } try (BytesStreamOutput out = new BytesStreamOutput()) { out.writeVInt(1); try (StreamInput in = out.bytes().streamInput()) { - assertThat(DataFrameTransformTaskState.fromStream(in), equalTo(DataFrameTransformTaskState.STARTED)); + assertThat(TransformTaskState.fromStream(in), equalTo(TransformTaskState.STARTED)); } } try (BytesStreamOutput out = new BytesStreamOutput()) { out.writeVInt(2); try (StreamInput in = out.bytes().streamInput()) { - assertThat(DataFrameTransformTaskState.fromStream(in), equalTo(DataFrameTransformTaskState.FAILED)); + assertThat(TransformTaskState.fromStream(in), equalTo(TransformTaskState.FAILED)); } } } @@ -70,10 +70,10 @@ public class DataFrameTransformTaskStateTests extends ESTestCase { try (BytesStreamOutput out = new BytesStreamOutput()) { out.writeVInt(randomIntBetween(3, Integer.MAX_VALUE)); try (StreamInput in = out.bytes().streamInput()) { - DataFrameTransformTaskState.fromStream(in); + TransformTaskState.fromStream(in); fail("Expected IOException"); } catch(IOException e) { - assertThat(e.getMessage(), containsString("Unknown DataFrameTransformTaskState ordinal [")); + assertThat(e.getMessage(), containsString("Unknown TransformTaskState ordinal [")); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformTests.java similarity index 67% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformTests.java index ce830240c63..3f0ecdc04f1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms; +package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -17,34 +17,34 @@ import java.io.IOException; import static org.hamcrest.Matchers.equalTo; -public class DataFrameTransformTests extends AbstractSerializingDataFrameTestCase { +public class TransformTests extends AbstractSerializingTransformTestCase { @Override - protected DataFrameTransform doParseInstance(XContentParser parser) throws IOException { - return DataFrameTransform.PARSER.apply(parser, null); + protected TransformTaskParams doParseInstance(XContentParser parser) throws IOException { + return TransformTaskParams.PARSER.apply(parser, null); } @Override - protected DataFrameTransform createTestInstance() { - return new DataFrameTransform(randomAlphaOfLength(10), randomBoolean() ? null : Version.CURRENT, + protected TransformTaskParams createTestInstance() { + return new TransformTaskParams(randomAlphaOfLength(10), randomBoolean() ? null : Version.CURRENT, randomBoolean() ? null : TimeValue.timeValueMillis(randomIntBetween(1_000, 3_600_000))); } @Override - protected Reader instanceReader() { - return DataFrameTransform::new; + protected Reader instanceReader() { + return TransformTaskParams::new; } public void testBackwardsSerialization() throws IOException { for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) { - DataFrameTransform transformTask = createTestInstance(); + TransformTaskParams transformTask = createTestInstance(); try (BytesStreamOutput output = new BytesStreamOutput()) { output.setVersion(Version.V_7_2_0); transformTask.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { in.setVersion(Version.V_7_2_0); // Since the old version does not have the version serialized, the version NOW is 7.2.0 - DataFrameTransform streamedTask = new DataFrameTransform(in); + TransformTaskParams streamedTask = new TransformTaskParams(in); assertThat(streamedTask.getVersion(), equalTo(Version.V_7_2_0)); assertThat(streamedTask.getId(), equalTo(transformTask.getId())); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/AggregationConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfigTests.java similarity index 94% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/AggregationConfigTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfigTests.java index c4b69c72ac6..b9e7f7abf5a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/AggregationConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfigTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Writeable.Reader; @@ -19,8 +19,8 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.xpack.core.dataframe.transforms.AbstractSerializingDataFrameTestCase; -import org.elasticsearch.xpack.core.dataframe.transforms.MockDeprecatedAggregationBuilder; +import org.elasticsearch.xpack.core.transform.transforms.AbstractSerializingTransformTestCase; +import org.elasticsearch.xpack.core.transform.transforms.MockDeprecatedAggregationBuilder; import org.junit.Before; import java.io.IOException; @@ -29,7 +29,7 @@ import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; -public class AggregationConfigTests extends AbstractSerializingDataFrameTestCase { +public class AggregationConfigTests extends AbstractSerializingTransformTestCase { private boolean lenient; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSourceTests.java similarity index 97% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSourceTests.java index b28cf603030..0628ae8ae7e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/DateHistogramGroupSourceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSourceTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.BytesStreamOutput; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfigTests.java similarity index 97% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfigTests.java index 11dfc55264a..6f999878bc7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/GroupConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfigTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; @@ -17,7 +17,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.AbstractSerializingTestCase; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.SingleGroupSource.Type; +import org.elasticsearch.xpack.core.transform.transforms.pivot.SingleGroupSource.Type; import java.io.IOException; import java.util.Collections; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/HistogramGroupSourceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSourceTests.java similarity index 95% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/HistogramGroupSourceTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSourceTests.java index 7035376c749..fd0baf3a5d7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/HistogramGroupSourceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSourceTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/PivotConfigTests.java similarity index 96% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/PivotConfigTests.java index 2f93f50d4d1..30c310fe3e4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/PivotConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/PivotConfigTests.java @@ -4,14 +4,14 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.xpack.core.dataframe.transforms.AbstractSerializingDataFrameTestCase; +import org.elasticsearch.xpack.core.transform.transforms.AbstractSerializingTransformTestCase; import java.io.IOException; import java.util.Arrays; @@ -21,7 +21,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; -public class PivotConfigTests extends AbstractSerializingDataFrameTestCase { +public class PivotConfigTests extends AbstractSerializingTransformTestCase { public static PivotConfig randomPivotConfig() { return new PivotConfig(GroupConfigTests.randomGroupConfig(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/TermsGroupSourceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/TermsGroupSourceTests.java similarity index 94% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/TermsGroupSourceTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/TermsGroupSourceTests.java index d41d68e4acc..60c973b3039 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/transforms/pivot/TermsGroupSourceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/TermsGroupSourceTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.transforms.pivot; +package org.elasticsearch.xpack.core.transform.transforms.pivot; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/utils/DataFrameStringsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/utils/TransformStringsTests.java similarity index 56% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/utils/DataFrameStringsTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/utils/TransformStringsTests.java index 2e3f3f13124..48c323fcbeb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/utils/DataFrameStringsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/utils/TransformStringsTests.java @@ -4,34 +4,34 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.dataframe.utils; +package org.elasticsearch.xpack.core.transform.utils; import org.elasticsearch.test.ESTestCase; -public class DataFrameStringsTests extends ESTestCase { +public class TransformStringsTests extends ESTestCase { public void testValidId() { - assertTrue(DataFrameStrings.isValidId("valid-_id")); + assertTrue(TransformStrings.isValidId("valid-_id")); } public void testValidId_givenUppercase() { - assertFalse(DataFrameStrings.isValidId("MiXedCase")); + assertFalse(TransformStrings.isValidId("MiXedCase")); } public void testValidId_givenStartsWithUnderScore() { - assertFalse(DataFrameStrings.isValidId("_this_bit_is_ok")); + assertFalse(TransformStrings.isValidId("_this_bit_is_ok")); } public void testKasValidLengthForId_givenTooLong() { StringBuilder sb = new StringBuilder(); - for (int i=0; i { + @Override + protected SnapshotLifecycleStats doParseInstance(XContentParser parser) throws IOException { + return SnapshotLifecycleStats.parse(parser); + } + + public static SnapshotLifecycleStats.SnapshotPolicyStats randomPolicyStats(String policyId) { + return new SnapshotLifecycleStats.SnapshotPolicyStats(policyId, + randomBoolean() ? 0 : randomNonNegativeLong(), + randomBoolean() ? 0 : randomNonNegativeLong(), + randomBoolean() ? 0 : randomNonNegativeLong(), + randomBoolean() ? 0 : randomNonNegativeLong()); + } + + public static SnapshotLifecycleStats randomLifecycleStats() { + int policies = randomIntBetween(0, 5); + Map policyStats = new HashMap<>(policies); + for (int i = 0; i < policies; i++) { + String policy = "policy-" + randomAlphaOfLength(4); + policyStats.put(policy, randomPolicyStats(policy)); + } + return new SnapshotLifecycleStats( + randomBoolean() ? 0 : randomNonNegativeLong(), + randomBoolean() ? 0 : randomNonNegativeLong(), + randomBoolean() ? 0 : randomNonNegativeLong(), + randomBoolean() ? 0 : randomNonNegativeLong(), + policyStats); + } + + @Override + protected SnapshotLifecycleStats createTestInstance() { + return randomLifecycleStats(); + } + + @Override + protected SnapshotLifecycleStats mutateInstance(SnapshotLifecycleStats instance) throws IOException { + return randomValueOtherThan(instance, () -> instance.merge(createTestInstance())); + } + + @Override + protected Writeable.Reader instanceReader() { + return SnapshotLifecycleStats::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionConfigurationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionConfigurationTests.java new file mode 100644 index 00000000000..378fe0c2d77 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionConfigurationTests.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class SnapshotRetentionConfigurationTests extends ESTestCase { + + private static final String REPO = "repo"; + + public void testConflictingSettings() { + IllegalArgumentException e; + e = expectThrows(IllegalArgumentException.class, () -> new SnapshotRetentionConfiguration(null, 0, null)); + assertThat(e.getMessage(), containsString("minimum snapshot count must be at least 1, but was: 0")); + e = expectThrows(IllegalArgumentException.class, () -> new SnapshotRetentionConfiguration(null, -2, null)); + assertThat(e.getMessage(), containsString("minimum snapshot count must be at least 1, but was: -2")); + e = expectThrows(IllegalArgumentException.class, () -> new SnapshotRetentionConfiguration(null, null, 0)); + assertThat(e.getMessage(), containsString("maximum snapshot count must be at least 1, but was: 0")); + e = expectThrows(IllegalArgumentException.class, () -> new SnapshotRetentionConfiguration(null, null, -2)); + assertThat(e.getMessage(), containsString("maximum snapshot count must be at least 1, but was: -2")); + e = expectThrows(IllegalArgumentException.class, () -> new SnapshotRetentionConfiguration(null, 3, 1)); + assertThat(e.getMessage(), containsString("minimum snapshot count 3 cannot be larger than maximum snapshot count 1")); + } + + public void testExpireAfter() { + SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration( + () -> TimeValue.timeValueDays(1).millis() + 1, + TimeValue.timeValueDays(1), null, null); + SnapshotInfo oldInfo = makeInfo(0); + assertThat(conf.getSnapshotDeletionPredicate(Collections.singletonList(oldInfo)).test(oldInfo), equalTo(true)); + + SnapshotInfo newInfo = makeInfo(1); + assertThat(conf.getSnapshotDeletionPredicate(Collections.singletonList(newInfo)).test(newInfo), equalTo(false)); + + List infos = new ArrayList<>(); + infos.add(newInfo); + infos.add(oldInfo); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(newInfo), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(oldInfo), equalTo(true)); + } + + public void testExpiredWithMinimum() { + SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> TimeValue.timeValueDays(1).millis() + 1, + TimeValue.timeValueDays(1), 2, null); + SnapshotInfo oldInfo = makeInfo(0); + SnapshotInfo newInfo = makeInfo(1); + + List infos = new ArrayList<>(); + infos.add(newInfo); + infos.add(oldInfo); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(newInfo), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(oldInfo), equalTo(false)); + + conf = new SnapshotRetentionConfiguration(() -> TimeValue.timeValueDays(1).millis() + 1, + TimeValue.timeValueDays(1), 1, null); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(newInfo), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(oldInfo), equalTo(true)); + } + + public void testMaximum() { + SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, null, 2, 5); + SnapshotInfo s1 = makeInfo(1); + SnapshotInfo s2 = makeInfo(2); + SnapshotInfo s3 = makeInfo(3); + SnapshotInfo s4 = makeInfo(4); + SnapshotInfo s5 = makeInfo(5); + SnapshotInfo s6 = makeInfo(6); + SnapshotInfo s7 = makeInfo(7); + SnapshotInfo s8 = makeInfo(8); + SnapshotInfo s9 = makeInfo(9); + + List infos = Arrays.asList(s1 , s2, s3, s4, s5, s6, s7, s8, s9); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s1), equalTo(true)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s2), equalTo(true)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s3), equalTo(true)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s4), equalTo(true)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s5), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s6), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s7), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s8), equalTo(false)); + assertThat(conf.getSnapshotDeletionPredicate(infos).test(s9), equalTo(false)); + } + + private SnapshotInfo makeInfo(long startTime) { + final Map meta = new HashMap<>(); + meta.put(SnapshotLifecyclePolicy.POLICY_ID_METADATA_FIELD, REPO); + return new SnapshotInfo(new SnapshotId("snap-" + randomAlphaOfLength(3), "uuid"), + Collections.singletonList("foo"), startTime, false, meta); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java deleted file mode 100644 index 85e03744a05..00000000000 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.dataframe; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.SetOnce; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.IndexScopedSettings; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.settings.SettingsModule; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.NamedXContentRegistry.Entry; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.persistent.PersistentTasksExecutor; -import org.elasticsearch.plugins.ActionPlugin; -import org.elasticsearch.plugins.PersistentTaskPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestHandler; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.threadpool.ExecutorBuilder; -import org.elasticsearch.threadpool.FixedExecutorBuilder; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.watcher.ResourceWatcherService; -import org.elasticsearch.xpack.core.XPackPlugin; -import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.dataframe.DataFrameNamedXContentProvider; -import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsAction; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction; -import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.UpdateDataFrameTransformAction; -import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; -import org.elasticsearch.xpack.dataframe.action.TransportDeleteDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.action.TransportGetDataFrameTransformsAction; -import org.elasticsearch.xpack.dataframe.action.TransportGetDataFrameTransformsStatsAction; -import org.elasticsearch.xpack.dataframe.action.TransportPreviewDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.action.TransportPutDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.action.TransportStartDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.action.TransportStartDataFrameTransformTaskAction; -import org.elasticsearch.xpack.dataframe.action.TransportStopDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.action.TransportUpdateDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; -import org.elasticsearch.xpack.dataframe.rest.action.RestDeleteDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.rest.action.RestGetDataFrameTransformsAction; -import org.elasticsearch.xpack.dataframe.rest.action.RestGetDataFrameTransformsStatsAction; -import org.elasticsearch.xpack.dataframe.rest.action.RestPreviewDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.rest.action.RestPutDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.rest.action.RestStartDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.rest.action.RestStopDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.rest.action.RestUpdateDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformPersistentTasksExecutor; -import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; - -import java.io.IOException; -import java.time.Clock; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.function.Supplier; -import java.util.function.UnaryOperator; - -import static java.util.Collections.emptyList; - -public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlugin { - - public static final String NAME = "data_frame"; - public static final String TASK_THREAD_POOL_NAME = "data_frame_indexing"; - - private static final Logger logger = LogManager.getLogger(DataFrame.class); - - private final boolean enabled; - private final Settings settings; - private final boolean transportClientMode; - private final SetOnce dataFrameTransformsConfigManager = new SetOnce<>(); - private final SetOnce dataFrameAuditor = new SetOnce<>(); - private final SetOnce dataFrameTransformsCheckpointService = new SetOnce<>(); - private final SetOnce schedulerEngine = new SetOnce<>(); - - public DataFrame(Settings settings) { - this.settings = settings; - - this.enabled = XPackSettings.DATA_FRAME_ENABLED.get(settings); - this.transportClientMode = XPackPlugin.transportClientMode(settings); - } - - @Override - public Collection createGuiceModules() { - List modules = new ArrayList<>(); - - if (transportClientMode) { - return modules; - } - - modules.add(b -> XPackPlugin.bindFeatureSet(b, DataFrameFeatureSet.class)); - return modules; - } - - protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } - - @Override - public List getRestHandlers(final Settings settings, final RestController restController, - final ClusterSettings clusterSettings, final IndexScopedSettings indexScopedSettings, final SettingsFilter settingsFilter, - final IndexNameExpressionResolver indexNameExpressionResolver, final Supplier nodesInCluster) { - - if (!enabled) { - return emptyList(); - } - - return Arrays.asList( - new RestPutDataFrameTransformAction(restController), - new RestStartDataFrameTransformAction(restController), - new RestStopDataFrameTransformAction(restController), - new RestDeleteDataFrameTransformAction(restController), - new RestGetDataFrameTransformsAction(restController), - new RestGetDataFrameTransformsStatsAction(restController), - new RestPreviewDataFrameTransformAction(restController), - new RestUpdateDataFrameTransformAction(restController) - ); - } - - @Override - public List> getActions() { - if (!enabled) { - return emptyList(); - } - - return Arrays.asList( - new ActionHandler<>(PutDataFrameTransformAction.INSTANCE, TransportPutDataFrameTransformAction.class), - new ActionHandler<>(StartDataFrameTransformAction.INSTANCE, TransportStartDataFrameTransformAction.class), - new ActionHandler<>(StartDataFrameTransformTaskAction.INSTANCE, TransportStartDataFrameTransformTaskAction.class), - new ActionHandler<>(StopDataFrameTransformAction.INSTANCE, TransportStopDataFrameTransformAction.class), - new ActionHandler<>(DeleteDataFrameTransformAction.INSTANCE, TransportDeleteDataFrameTransformAction.class), - new ActionHandler<>(GetDataFrameTransformsAction.INSTANCE, TransportGetDataFrameTransformsAction.class), - new ActionHandler<>(GetDataFrameTransformsStatsAction.INSTANCE, TransportGetDataFrameTransformsStatsAction.class), - new ActionHandler<>(PreviewDataFrameTransformAction.INSTANCE, TransportPreviewDataFrameTransformAction.class), - new ActionHandler<>(UpdateDataFrameTransformAction.INSTANCE, TransportUpdateDataFrameTransformAction.class) - ); - } - - @Override - public List> getExecutorBuilders(Settings settings) { - if (false == enabled || transportClientMode) { - return emptyList(); - } - - FixedExecutorBuilder indexing = new FixedExecutorBuilder(settings, TASK_THREAD_POOL_NAME, 4, 4, - "data_frame.task_thread_pool"); - - return Collections.singletonList(indexing); - } - - @Override - public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, - ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, - Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - if (enabled == false || transportClientMode) { - return emptyList(); - } - dataFrameAuditor.set(new DataFrameAuditor(client, clusterService.getNodeName())); - dataFrameTransformsConfigManager.set(new DataFrameTransformsConfigManager(client, xContentRegistry)); - dataFrameTransformsCheckpointService.set(new DataFrameTransformsCheckpointService(client, - dataFrameTransformsConfigManager.get(), - dataFrameAuditor.get())); - - return Arrays.asList(dataFrameTransformsConfigManager.get(), dataFrameAuditor.get(), dataFrameTransformsCheckpointService.get()); - } - - @Override - public UnaryOperator> getIndexTemplateMetaDataUpgrader() { - return templates -> { - try { - templates.put(DataFrameInternalIndex.LATEST_INDEX_VERSIONED_NAME, DataFrameInternalIndex.getIndexTemplateMetaData()); - } catch (IOException e) { - logger.error("Error creating data frame index template", e); - } - try { - templates.put(DataFrameInternalIndex.AUDIT_INDEX, DataFrameInternalIndex.getAuditIndexTemplateMetaData()); - } catch (IOException e) { - logger.warn("Error creating data frame audit index", e); - } - return templates; - }; - } - - @Override - public List> getPersistentTasksExecutor(ClusterService clusterService, ThreadPool threadPool, - Client client, SettingsModule settingsModule) { - if (enabled == false || transportClientMode) { - return emptyList(); - } - - schedulerEngine.set(new SchedulerEngine(settings, Clock.systemUTC())); - - // the transforms config manager should have been created - assert dataFrameTransformsConfigManager.get() != null; - // the auditor should have been created - assert dataFrameAuditor.get() != null; - assert dataFrameTransformsCheckpointService.get() != null; - - return Collections.singletonList( - new DataFrameTransformPersistentTasksExecutor(client, - dataFrameTransformsConfigManager.get(), - dataFrameTransformsCheckpointService.get(), - schedulerEngine.get(), - dataFrameAuditor.get(), - threadPool, - clusterService, - settingsModule.getSettings())); - } - - @Override - public List> getSettings() { - return Collections.singletonList(DataFrameTransformTask.NUM_FAILURE_RETRIES_SETTING); - } - - @Override - public void close() { - if (schedulerEngine.get() != null) { - schedulerEngine.get().stop(); - } - } - - @Override - public List getNamedXContent() { - return new DataFrameNamedXContentProvider().getNamedXContentParsers(); - } -} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java deleted file mode 100644 index 17df98e0c2b..00000000000 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformTaskAction.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.dataframe.action; - -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.tasks.TransportTasksAction; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; -import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; - -import java.util.List; - -/** - * Internal only transport class to change an allocated persistent task's state to started - */ -public class TransportStartDataFrameTransformTaskAction extends - TransportTasksAction { - - private final XPackLicenseState licenseState; - - @Inject - public TransportStartDataFrameTransformTaskAction(TransportService transportService, ActionFilters actionFilters, - ClusterService clusterService, XPackLicenseState licenseState) { - super(StartDataFrameTransformTaskAction.NAME, clusterService, transportService, actionFilters, - StartDataFrameTransformTaskAction.Request::new, StartDataFrameTransformTaskAction.Response::new, - StartDataFrameTransformTaskAction.Response::new, ThreadPool.Names.SAME); - this.licenseState = licenseState; - } - - @Override - protected void doExecute(Task task, StartDataFrameTransformTaskAction.Request request, - ActionListener listener) { - - if (!licenseState.isDataFrameAllowed()) { - listener.onFailure(LicenseUtils.newComplianceException(XPackField.DATA_FRAME)); - return; - } - - super.doExecute(task, request, listener); - } - - @Override - protected void taskOperation(StartDataFrameTransformTaskAction.Request request, DataFrameTransformTask transformTask, - ActionListener listener) { - if (transformTask.getTransformId().equals(request.getId())) { - transformTask.start(null, request.isForce(), listener); - } else { - listener.onFailure(new RuntimeException("ID of data frame transform task [" + transformTask.getTransformId() - + "] does not match request's ID [" + request.getId() + "]")); - } - } - - @Override - protected StartDataFrameTransformTaskAction.Response newResponse(StartDataFrameTransformTaskAction.Request request, - List tasks, - List taskOperationFailures, - List failedNodeExceptions) { - - if (taskOperationFailures.isEmpty() == false) { - throw org.elasticsearch.ExceptionsHelper.convertToElastic(taskOperationFailures.get(0).getCause()); - } else if (failedNodeExceptions.isEmpty() == false) { - throw org.elasticsearch.ExceptionsHelper.convertToElastic(failedNodeExceptions.get(0)); - } - - // Either the transform doesn't exist (the user didn't create it yet) or was deleted - // after the StartAPI executed. - // In either case, let the user know - if (tasks.size() == 0) { - throw new ResourceNotFoundException("Task for data frame transform [" + request.getId() + "] not found"); - } - - assert tasks.size() == 1; - - boolean allStarted = tasks.stream().allMatch(StartDataFrameTransformTaskAction.Response::isStarted); - return new StartDataFrameTransformTaskAction.Response(allStarted); - } -} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java deleted file mode 100644 index 85ebcde85c0..00000000000 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointService.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.dataframe.checkpoint; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; -import org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; - -/** - * DataFrameTransform Checkpoint Service - * - * Allows checkpointing a source of a data frame transform which includes all relevant checkpoints of the source. - * - * This will be used to checkpoint a transform, detect changes, run the transform in continuous mode. - * - */ -public class DataFrameTransformsCheckpointService { - - private static final Logger logger = LogManager.getLogger(DataFrameTransformsCheckpointService.class); - - private final Client client; - private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; - private final DataFrameAuditor dataFrameAuditor; - - public DataFrameTransformsCheckpointService(final Client client, - final DataFrameTransformsConfigManager dataFrameTransformsConfigManager, DataFrameAuditor dataFrameAuditor) { - this.client = client; - this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; - this.dataFrameAuditor = dataFrameAuditor; - } - - public CheckpointProvider getCheckpointProvider(final DataFrameTransformConfig transformConfig) { - if (transformConfig.getSyncConfig() instanceof TimeSyncConfig) { - return new TimeBasedCheckpointProvider(client, dataFrameTransformsConfigManager, dataFrameAuditor, transformConfig); - } - - return new DefaultCheckpointProvider(client, dataFrameTransformsConfigManager, dataFrameAuditor, transformConfig); - } - - /** - * Get checkpointing stats for a stopped data frame - * - * @param transformId The data frame task - * @param lastCheckpointNumber the last checkpoint - * @param nextCheckpointPosition position for the next checkpoint - * @param nextCheckpointProgress progress for the next checkpoint - * @param listener listener to retrieve the result - */ - public void getCheckpointingInfo(final String transformId, - final long lastCheckpointNumber, - final DataFrameIndexerPosition nextCheckpointPosition, - final DataFrameTransformProgress nextCheckpointProgress, - final ActionListener listener) { - - // we need to retrieve the config first before we can defer the rest to the corresponding provider - dataFrameTransformsConfigManager.getTransformConfiguration(transformId, ActionListener.wrap( - transformConfig -> { - getCheckpointProvider(transformConfig).getCheckpointingInfo(lastCheckpointNumber, - nextCheckpointPosition, nextCheckpointProgress, listener); - }, - transformError -> { - logger.warn("Failed to retrieve configuration for data frame [" + transformId + "]", transformError); - listener.onFailure(new CheckpointException("Failed to retrieve configuration", transformError)); - }) - ); - } - -} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/notifications/DataFrameAuditor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/notifications/DataFrameAuditor.java deleted file mode 100644 index 9d3e7f51c2e..00000000000 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/notifications/DataFrameAuditor.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.dataframe.notifications; - -import org.elasticsearch.client.Client; -import org.elasticsearch.xpack.core.common.notifications.AbstractAuditor; -import org.elasticsearch.xpack.core.dataframe.notifications.DataFrameAuditMessage; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; - -import static org.elasticsearch.xpack.core.ClientHelper.DATA_FRAME_ORIGIN; - -/** - * DataFrameAuditor class that abstracts away generic templating for easier injection - */ -public class DataFrameAuditor extends AbstractAuditor { - - public DataFrameAuditor(Client client, String nodeName) { - super(client, nodeName, DataFrameInternalIndex.AUDIT_INDEX, DATA_FRAME_ORIGIN, DataFrameAuditMessage::new); - } -} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java deleted file mode 100644 index 4d891427e4d..00000000000 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.dataframe.rest.action; - - -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; - -public class RestDeleteDataFrameTransformAction extends BaseRestHandler { - - public RestDeleteDataFrameTransformAction(RestController controller) { - controller.registerHandler(RestRequest.Method.DELETE, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID, this); - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - if (restRequest.hasContent()) { - throw new IllegalArgumentException("delete data frame transforms requests can not have a request body"); - } - - String id = restRequest.param(DataFrameField.ID.getPreferredName()); - boolean force = restRequest.paramAsBoolean(DataFrameField.FORCE.getPreferredName(), false); - DeleteDataFrameTransformAction.Request request = new DeleteDataFrameTransformAction.Request(id, force); - - return channel -> client.execute(DeleteDataFrameTransformAction.INSTANCE, request, - new RestToXContentListener<>(channel)); - } - - @Override - public String getName() { - return "data_frame_delete_transform_action"; - } -} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPutDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPutDataFrameTransformAction.java deleted file mode 100644 index 5bd2c9fe479..00000000000 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPutDataFrameTransformAction.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.dataframe.rest.action; - -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction; - -import java.io.IOException; - -public class RestPutDataFrameTransformAction extends BaseRestHandler { - - public RestPutDataFrameTransformAction(RestController controller) { - controller.registerHandler(RestRequest.Method.PUT, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID, this); - } - - @Override - public String getName() { - return "data_frame_put_transform_action"; - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - String id = restRequest.param(DataFrameField.ID.getPreferredName()); - XContentParser parser = restRequest.contentParser(); - - boolean deferValidation = restRequest.paramAsBoolean(DataFrameField.DEFER_VALIDATION.getPreferredName(), false); - PutDataFrameTransformAction.Request request = PutDataFrameTransformAction.Request.fromXContent(parser, id, deferValidation); - - return channel -> client.execute(PutDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); - } -} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java deleted file mode 100644 index 112fe708d59..00000000000 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStopDataFrameTransformAction.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.dataframe.rest.action; - -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; - -public class RestStopDataFrameTransformAction extends BaseRestHandler { - - public RestStopDataFrameTransformAction(RestController controller) { - controller.registerHandler(RestRequest.Method.POST, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID + "_stop", this); - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - String id = restRequest.param(DataFrameField.ID.getPreferredName()); - TimeValue timeout = restRequest.paramAsTime(DataFrameField.TIMEOUT.getPreferredName(), - StopDataFrameTransformAction.DEFAULT_TIMEOUT); - boolean waitForCompletion = restRequest.paramAsBoolean(DataFrameField.WAIT_FOR_COMPLETION.getPreferredName(), false); - boolean force = restRequest.paramAsBoolean(DataFrameField.FORCE.getPreferredName(), false); - boolean allowNoMatch = restRequest.paramAsBoolean(DataFrameField.ALLOW_NO_MATCH.getPreferredName(), false); - - - StopDataFrameTransformAction.Request request = new StopDataFrameTransformAction.Request(id, - waitForCompletion, - force, - timeout, - allowNoMatch); - - return channel -> client.execute(StopDataFrameTransformAction.INSTANCE, request, - new RestToXContentListener<>(channel)); - } - - @Override - public String getName() { - return "data_frame_stop_transform_action"; - } -} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestUpdateDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestUpdateDataFrameTransformAction.java deleted file mode 100644 index 5449f3e8d9e..00000000000 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestUpdateDataFrameTransformAction.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.dataframe.rest.action; - -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.action.UpdateDataFrameTransformAction; - -import java.io.IOException; - -public class RestUpdateDataFrameTransformAction extends BaseRestHandler { - - public RestUpdateDataFrameTransformAction(RestController controller) { - controller.registerHandler(RestRequest.Method.POST, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID + "_update", this); - } - - @Override - public String getName() { - return "data_frame_update_transform_action"; - } - - @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - String id = restRequest.param(DataFrameField.ID.getPreferredName()); - boolean deferValidation = restRequest.paramAsBoolean(DataFrameField.DEFER_VALIDATION.getPreferredName(), false); - XContentParser parser = restRequest.contentParser(); - UpdateDataFrameTransformAction.Request request = UpdateDataFrameTransformAction.Request.fromXContent(parser, id, deferValidation); - - return channel -> client.execute(UpdateDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); - } -} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerBuilder.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerBuilder.java deleted file mode 100644 index 672cdcc25ef..00000000000 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerBuilder.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.dataframe.transforms; - -import org.elasticsearch.client.Client; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; -import org.elasticsearch.xpack.core.indexing.IndexerState; -import org.elasticsearch.xpack.dataframe.checkpoint.CheckpointProvider; -import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; - -import java.util.Map; -import java.util.concurrent.atomic.AtomicReference; - -class ClientDataFrameIndexerBuilder { - private Client client; - private DataFrameTransformsConfigManager transformsConfigManager; - private DataFrameTransformsCheckpointService transformsCheckpointService; - private DataFrameAuditor auditor; - private Map fieldMappings; - private DataFrameTransformConfig transformConfig; - private DataFrameIndexerTransformStats initialStats; - private IndexerState indexerState = IndexerState.STOPPED; - private DataFrameIndexerPosition initialPosition; - private DataFrameTransformProgress progress; - private DataFrameTransformCheckpoint lastCheckpoint; - private DataFrameTransformCheckpoint nextCheckpoint; - - ClientDataFrameIndexerBuilder() { - this.initialStats = new DataFrameIndexerTransformStats(); - } - - ClientDataFrameIndexer build(DataFrameTransformTask parentTask) { - CheckpointProvider checkpointProvider = transformsCheckpointService.getCheckpointProvider(transformConfig); - - return new ClientDataFrameIndexer(this.transformsConfigManager, - checkpointProvider, - new AtomicReference<>(this.indexerState), - this.initialPosition, - this.client, - this.auditor, - this.initialStats, - this.transformConfig, - this.fieldMappings, - this.progress, - this.lastCheckpoint, - this.nextCheckpoint, - parentTask); - } - - ClientDataFrameIndexerBuilder setClient(Client client) { - this.client = client; - return this; - } - - ClientDataFrameIndexerBuilder setTransformsConfigManager(DataFrameTransformsConfigManager transformsConfigManager) { - this.transformsConfigManager = transformsConfigManager; - return this; - } - - ClientDataFrameIndexerBuilder setTransformsCheckpointService(DataFrameTransformsCheckpointService transformsCheckpointService) { - this.transformsCheckpointService = transformsCheckpointService; - return this; - } - - ClientDataFrameIndexerBuilder setAuditor(DataFrameAuditor auditor) { - this.auditor = auditor; - return this; - } - - ClientDataFrameIndexerBuilder setFieldMappings(Map fieldMappings) { - this.fieldMappings = fieldMappings; - return this; - } - - ClientDataFrameIndexerBuilder setTransformConfig(DataFrameTransformConfig transformConfig) { - this.transformConfig = transformConfig; - return this; - } - - DataFrameTransformConfig getTransformConfig() { - return this.transformConfig; - } - - ClientDataFrameIndexerBuilder setInitialStats(DataFrameIndexerTransformStats initialStats) { - this.initialStats = initialStats; - return this; - } - - ClientDataFrameIndexerBuilder setIndexerState(IndexerState indexerState) { - this.indexerState = indexerState; - return this; - } - - ClientDataFrameIndexerBuilder setInitialPosition(DataFrameIndexerPosition initialPosition) { - this.initialPosition = initialPosition; - return this; - } - - ClientDataFrameIndexerBuilder setProgress(DataFrameTransformProgress progress) { - this.progress = progress; - return this; - } - - ClientDataFrameIndexerBuilder setLastCheckpoint(DataFrameTransformCheckpoint lastCheckpoint) { - this.lastCheckpoint = lastCheckpoint; - return this; - } - - ClientDataFrameIndexerBuilder setNextCheckpoint(DataFrameTransformCheckpoint nextCheckpoint) { - this.nextCheckpoint = nextCheckpoint; - return this; - } -} \ No newline at end of file diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java deleted file mode 100644 index 56c8d9ce07f..00000000000 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java +++ /dev/null @@ -1,390 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.dataframe.persistence; - -import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.xpack.core.action.util.PageParams; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointTests; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStoredDoc; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStoredDocTests; -import org.elasticsearch.xpack.dataframe.DataFrameSingleNodeTestCase; -import org.junit.Before; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.stream.Collectors; - -import static org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex.mappings; -import static org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager.TO_XCONTENT_PARAMS; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; - -public class DataFrameTransformsConfigManagerTests extends DataFrameSingleNodeTestCase { - - private DataFrameTransformsConfigManager transformsConfigManager; - - @Before - public void createComponents() { - transformsConfigManager = new DataFrameTransformsConfigManager(client(), xContentRegistry()); - } - - public void testGetMissingTransform() throws InterruptedException { - // the index does not exist yet - assertAsync(listener -> transformsConfigManager.getTransformConfiguration("not_there", listener), (DataFrameTransformConfig) null, - null, e -> { - assertEquals(ResourceNotFoundException.class, e.getClass()); - assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, "not_there"), - e.getMessage()); - }); - - // create one transform and test with an existing index - assertAsync( - listener -> transformsConfigManager - .putTransformConfiguration(DataFrameTransformConfigTests.randomDataFrameTransformConfig(), listener), - true, null, null); - - // same test, but different code path - assertAsync(listener -> transformsConfigManager.getTransformConfiguration("not_there", listener), (DataFrameTransformConfig) null, - null, e -> { - assertEquals(ResourceNotFoundException.class, e.getClass()); - assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, "not_there"), - e.getMessage()); - }); - } - - public void testDeleteMissingTransform() throws InterruptedException { - // the index does not exist yet - assertAsync(listener -> transformsConfigManager.deleteTransform("not_there", listener), (Boolean) null, null, e -> { - assertEquals(ResourceNotFoundException.class, e.getClass()); - assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, "not_there"), e.getMessage()); - }); - - // create one transform and test with an existing index - assertAsync( - listener -> transformsConfigManager - .putTransformConfiguration(DataFrameTransformConfigTests.randomDataFrameTransformConfig(), listener), - true, null, null); - - // same test, but different code path - assertAsync(listener -> transformsConfigManager.deleteTransform("not_there", listener), (Boolean) null, null, e -> { - assertEquals(ResourceNotFoundException.class, e.getClass()); - assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, "not_there"), e.getMessage()); - }); - } - - public void testCreateReadDeleteTransform() throws InterruptedException { - DataFrameTransformConfig transformConfig = DataFrameTransformConfigTests.randomDataFrameTransformConfig(); - - // create transform - assertAsync(listener -> transformsConfigManager.putTransformConfiguration(transformConfig, listener), true, null, null); - - // read transform - assertAsync(listener -> transformsConfigManager.getTransformConfiguration(transformConfig.getId(), listener), transformConfig, null, - null); - - // try to create again - assertAsync(listener -> transformsConfigManager.putTransformConfiguration(transformConfig, listener), (Boolean) null, null, e -> { - assertEquals(ResourceAlreadyExistsException.class, e.getClass()); - assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_TRANSFORM_EXISTS, transformConfig.getId()), - e.getMessage()); - }); - - // delete transform - assertAsync(listener -> transformsConfigManager.deleteTransform(transformConfig.getId(), listener), true, null, null); - - // delete again - assertAsync(listener -> transformsConfigManager.deleteTransform(transformConfig.getId(), listener), (Boolean) null, null, e -> { - assertEquals(ResourceNotFoundException.class, e.getClass()); - assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformConfig.getId()), - e.getMessage()); - }); - - // try to get deleted transform - assertAsync(listener -> transformsConfigManager.getTransformConfiguration(transformConfig.getId(), listener), - (DataFrameTransformConfig) null, null, e -> { - assertEquals(ResourceNotFoundException.class, e.getClass()); - assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformConfig.getId()), - e.getMessage()); - }); - } - - public void testCreateReadDeleteCheckPoint() throws InterruptedException { - DataFrameTransformCheckpoint checkpoint = DataFrameTransformCheckpointTests.randomDataFrameTransformCheckpoints(); - - // create - assertAsync(listener -> transformsConfigManager.putTransformCheckpoint(checkpoint, listener), true, null, null); - - // read - assertAsync(listener -> transformsConfigManager.getTransformCheckpoint(checkpoint.getTransformId(), checkpoint.getCheckpoint(), - listener), checkpoint, null, null); - - // delete - assertAsync(listener -> transformsConfigManager.deleteTransform(checkpoint.getTransformId(), listener), true, null, null); - - // delete again - assertAsync(listener -> transformsConfigManager.deleteTransform(checkpoint.getTransformId(), listener), (Boolean) null, null, e -> { - assertEquals(ResourceNotFoundException.class, e.getClass()); - assertEquals(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, checkpoint.getTransformId()), - e.getMessage()); - }); - - // getting a non-existing checkpoint returns null - assertAsync(listener -> transformsConfigManager.getTransformCheckpoint(checkpoint.getTransformId(), checkpoint.getCheckpoint(), - listener), DataFrameTransformCheckpoint.EMPTY, null, null); - } - - public void testExpandIds() throws Exception { - DataFrameTransformConfig transformConfig1 = DataFrameTransformConfigTests.randomDataFrameTransformConfig("transform1_expand"); - DataFrameTransformConfig transformConfig2 = DataFrameTransformConfigTests.randomDataFrameTransformConfig("transform2_expand"); - DataFrameTransformConfig transformConfig3 = DataFrameTransformConfigTests.randomDataFrameTransformConfig("transform3_expand"); - - // create transform - assertAsync(listener -> transformsConfigManager.putTransformConfiguration(transformConfig1, listener), true, null, null); - assertAsync(listener -> transformsConfigManager.putTransformConfiguration(transformConfig2, listener), true, null, null); - assertAsync(listener -> transformsConfigManager.putTransformConfiguration(transformConfig3, listener), true, null, null); - - - // expand 1 id - assertAsync(listener -> - transformsConfigManager.expandTransformIds(transformConfig1.getId(), - PageParams.defaultParams(), - true, - listener), - new Tuple<>(1L, Collections.singletonList("transform1_expand")), - null, - null); - - // expand 2 ids explicitly - assertAsync(listener -> - transformsConfigManager.expandTransformIds("transform1_expand,transform2_expand", - PageParams.defaultParams(), - true, - listener), - new Tuple<>(2L, Arrays.asList("transform1_expand", "transform2_expand")), - null, - null); - - // expand 3 ids wildcard and explicit - assertAsync(listener -> - transformsConfigManager.expandTransformIds("transform1*,transform2_expand,transform3_expand", - PageParams.defaultParams(), - true, - listener), - new Tuple<>(3L, Arrays.asList("transform1_expand", "transform2_expand", "transform3_expand")), - null, - null); - - // expand 3 ids _all - assertAsync(listener -> - transformsConfigManager.expandTransformIds("_all", - PageParams.defaultParams(), - true, - listener), - new Tuple<>(3L, Arrays.asList("transform1_expand", "transform2_expand", "transform3_expand")), - null, - null); - - // expand 1 id _all with pagination - assertAsync(listener -> - transformsConfigManager.expandTransformIds("_all", - new PageParams(0, 1), - true, - listener), - new Tuple<>(3L, Collections.singletonList("transform1_expand")), - null, - null); - - // expand 2 later ids _all with pagination - assertAsync(listener -> - transformsConfigManager.expandTransformIds("_all", - new PageParams(1, 2), - true, - listener), - new Tuple<>(3L, Arrays.asList("transform2_expand", "transform3_expand")), - null, - null); - - // expand 1 id explicitly that does not exist - assertAsync(listener -> - transformsConfigManager.expandTransformIds("unknown,unknown2", - new PageParams(1, 2), - true, - listener), - (Tuple>)null, - null, - e -> { - assertThat(e, instanceOf(ResourceNotFoundException.class)); - assertThat(e.getMessage(), - equalTo(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, "unknown,unknown2"))); - }); - - // expand 1 id implicitly that does not exist - assertAsync(listener -> - transformsConfigManager.expandTransformIds("unknown*", - new PageParams(1, 2), - false, - listener), - (Tuple>)null, - null, - e -> { - assertThat(e, instanceOf(ResourceNotFoundException.class)); - assertThat(e.getMessage(), - equalTo(DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, "unknown*"))); - }); - - } - - public void testStoredDoc() throws InterruptedException { - String transformId = "transform_test_stored_doc_create_read_update"; - - DataFrameTransformStoredDoc storedDocs = DataFrameTransformStoredDocTests.randomDataFrameTransformStoredDoc(transformId); - SeqNoPrimaryTermAndIndex firstIndex = new SeqNoPrimaryTermAndIndex(0, 1, DataFrameInternalIndex.LATEST_INDEX_NAME); - - assertAsync(listener -> transformsConfigManager.putOrUpdateTransformStoredDoc(storedDocs, null, listener), - firstIndex, - null, - null); - assertAsync(listener -> transformsConfigManager.getTransformStoredDoc(transformId, listener), - Tuple.tuple(storedDocs, firstIndex), - null, - null); - - SeqNoPrimaryTermAndIndex secondIndex = new SeqNoPrimaryTermAndIndex(1, 1, DataFrameInternalIndex.LATEST_INDEX_NAME); - DataFrameTransformStoredDoc updated = DataFrameTransformStoredDocTests.randomDataFrameTransformStoredDoc(transformId); - assertAsync(listener -> transformsConfigManager.putOrUpdateTransformStoredDoc(updated, firstIndex, listener), - secondIndex, - null, - null); - assertAsync(listener -> transformsConfigManager.getTransformStoredDoc(transformId, listener), - Tuple.tuple(updated, secondIndex), - null, - null); - - assertAsync(listener -> transformsConfigManager.putOrUpdateTransformStoredDoc(updated, firstIndex, listener), - (SeqNoPrimaryTermAndIndex)null, - r -> fail("did not fail with version conflict."), - e -> assertThat( - e.getMessage(), - equalTo("Failed to persist data frame statistics for transform [transform_test_stored_doc_create_read_update]")) - ); - } - - public void testGetStoredDocMultiple() throws InterruptedException { - int numStats = randomIntBetween(10, 15); - List expectedDocs = new ArrayList<>(); - for (int i=0; i transformsConfigManager.putOrUpdateTransformStoredDoc(stat, null, listener), - initialSeqNo, - null, - null); - } - - // remove one of the put docs so we don't retrieve all - if (expectedDocs.size() > 1) { - expectedDocs.remove(expectedDocs.size() - 1); - } - List ids = expectedDocs.stream().map(DataFrameTransformStoredDoc::getId).collect(Collectors.toList()); - - // returned docs will be ordered by id - expectedDocs.sort(Comparator.comparing(DataFrameTransformStoredDoc::getId)); - assertAsync(listener -> transformsConfigManager.getTransformStoredDoc(ids, listener), expectedDocs, null, null); - } - - public void testDeleteOldTransformConfigurations() throws Exception { - String oldIndex = DataFrameInternalIndex.INDEX_PATTERN + "1"; - String transformId = "transform_test_delete_old_configurations"; - String docId = DataFrameTransformConfig.documentId(transformId); - DataFrameTransformConfig transformConfig = DataFrameTransformConfigTests - .randomDataFrameTransformConfig("transform_test_delete_old_configurations"); - client().admin().indices().create(new CreateIndexRequest(oldIndex) - .mapping(MapperService.SINGLE_MAPPING_NAME, mappings())).actionGet(); - - try(XContentBuilder builder = XContentFactory.jsonBuilder()) { - XContentBuilder source = transformConfig.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); - IndexRequest request = new IndexRequest(oldIndex) - .source(source) - .id(docId) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - client().index(request).actionGet(); - } - - assertAsync(listener -> transformsConfigManager.putTransformConfiguration(transformConfig, listener), true, null, null); - - assertThat(client().get(new GetRequest(oldIndex).id(docId)).actionGet().isExists(), is(true)); - assertThat(client().get(new GetRequest(DataFrameInternalIndex.LATEST_INDEX_NAME).id(docId)).actionGet().isExists(), is(true)); - - assertAsync(listener -> transformsConfigManager.deleteOldTransformConfigurations(transformId, listener), true, null, null); - - client().admin().indices().refresh(new RefreshRequest(DataFrameInternalIndex.INDEX_NAME_PATTERN)).actionGet(); - assertThat(client().get(new GetRequest(oldIndex).id(docId)).actionGet().isExists(), is(false)); - assertThat(client().get(new GetRequest(DataFrameInternalIndex.LATEST_INDEX_NAME).id(docId)).actionGet().isExists(), is(true)); - } - - public void testDeleteOldTransformStoredDocuments() throws Exception { - String oldIndex = DataFrameInternalIndex.INDEX_PATTERN + "1"; - String transformId = "transform_test_delete_old_stored_documents"; - String docId = DataFrameTransformStoredDoc.documentId(transformId); - DataFrameTransformStoredDoc dataFrameTransformStoredDoc = DataFrameTransformStoredDocTests - .randomDataFrameTransformStoredDoc(transformId); - client().admin().indices().create(new CreateIndexRequest(oldIndex) - .mapping(MapperService.SINGLE_MAPPING_NAME, mappings())).actionGet(); - - try(XContentBuilder builder = XContentFactory.jsonBuilder()) { - XContentBuilder source = dataFrameTransformStoredDoc.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); - IndexRequest request = new IndexRequest(oldIndex) - .source(source) - .id(docId); - client().index(request).actionGet(); - } - - // Put when referencing the old index should create the doc in the new index, even if we have seqNo|primaryTerm info - assertAsync(listener -> transformsConfigManager.putOrUpdateTransformStoredDoc(dataFrameTransformStoredDoc, - new SeqNoPrimaryTermAndIndex(3, 1, oldIndex), - listener), - new SeqNoPrimaryTermAndIndex(0, 1, DataFrameInternalIndex.LATEST_INDEX_NAME), - null, - null); - - client().admin().indices().refresh(new RefreshRequest(DataFrameInternalIndex.INDEX_NAME_PATTERN)).actionGet(); - - assertThat(client().get(new GetRequest(oldIndex).id(docId)).actionGet().isExists(), is(true)); - assertThat(client().get(new GetRequest(DataFrameInternalIndex.LATEST_INDEX_NAME).id(docId)).actionGet().isExists(), is(true)); - - assertAsync(listener -> transformsConfigManager.deleteOldTransformStoredDocuments(transformId, listener), - true, - null, - null); - - client().admin().indices().refresh(new RefreshRequest(DataFrameInternalIndex.INDEX_NAME_PATTERN)).actionGet(); - assertThat(client().get(new GetRequest(oldIndex).id(docId)).actionGet().isExists(), is(false)); - assertThat(client().get(new GetRequest(DataFrameInternalIndex.LATEST_INDEX_NAME).id(docId)).actionGet().isExists(), is(true)); - } -} diff --git a/x-pack/plugin/ilm/build.gradle b/x-pack/plugin/ilm/build.gradle index 90e4c99bc6b..7fec7486bec 100644 --- a/x-pack/plugin/ilm/build.gradle +++ b/x-pack/plugin/ilm/build.gradle @@ -27,4 +27,3 @@ gradle.projectsEvaluated { } integTest.enabled = false - diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java index 081ad1b5ec4..f9baedf1ae1 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java @@ -387,7 +387,8 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase { // index document so snapshot actually does something indexDocument(); // start snapshot - request = new Request("PUT", "/_snapshot/repo/snapshot"); + String snapName = "snapshot-" + randomAlphaOfLength(6).toLowerCase(Locale.ROOT); + request = new Request("PUT", "/_snapshot/repo/" + snapName); request.addParameter("wait_for_completion", "false"); request.setJsonEntity("{\"indices\": \"" + index + "\"}"); assertOK(client().performRequest(request)); @@ -396,10 +397,10 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase { // assert that index was deleted assertBusy(() -> assertFalse(indexExists(index)), 2, TimeUnit.MINUTES); // assert that snapshot is still in progress and clean up - assertThat(getSnapshotState("snapshot"), equalTo("SUCCESS")); - assertOK(client().performRequest(new Request("DELETE", "/_snapshot/repo/snapshot"))); + assertThat(getSnapshotState(snapName), equalTo("SUCCESS")); + assertOK(client().performRequest(new Request("DELETE", "/_snapshot/repo/" + snapName))); ResponseException e = expectThrows(ResponseException.class, - () -> client().performRequest(new Request("GET", "/_snapshot/repo/snapshot"))); + () -> client().performRequest(new Request("GET", "/_snapshot/repo/" + snapName))); assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(404)); } diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java similarity index 57% rename from x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleIT.java rename to x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java index c9804b0df1d..21874386f55 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.slm; import org.apache.http.util.EntityUtils; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -14,6 +16,8 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.indexlifecycle.RolloverAction; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; @@ -23,9 +27,11 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.ilm.LifecycleSettings; import org.elasticsearch.xpack.core.ilm.Step; import org.elasticsearch.xpack.core.ilm.WaitForRolloverReadyStep; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; import java.io.IOException; import java.io.InputStream; @@ -34,9 +40,11 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Optional; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.slm.history.SnapshotHistoryItem.CREATE_OPERATION; +import static org.elasticsearch.xpack.core.slm.history.SnapshotHistoryItem.DELETE_OPERATION; import static org.elasticsearch.xpack.core.slm.history.SnapshotHistoryStore.SLM_HISTORY_INDEX_PREFIX; import static org.elasticsearch.xpack.ilm.TimeSeriesLifecycleActionsIT.getStepKeyForIndex; import static org.hamcrest.Matchers.containsString; @@ -45,7 +53,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.startsWith; -public class SnapshotLifecycleIT extends ESRestTestCase { +public class SnapshotLifecycleRestIT extends ESRestTestCase { @Override protected boolean waitForAllSnapshotsWiped() { @@ -53,10 +61,8 @@ public class SnapshotLifecycleIT extends ESRestTestCase { } public void testMissingRepo() throws Exception { - final String policyId = "test-policy"; - final String missingRepoName = "missing-repo"; - SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(policyId, "snap", - "*/1 * * * * ?", missingRepoName, Collections.emptyMap()); + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy("test-policy", "snap", + "*/1 * * * * ?", "missing-repo", Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY); Request putLifecycle = new Request("PUT", "/_slm/policy/test-policy"); XContentBuilder lifecycleBuilder = JsonXContent.contentBuilder(); @@ -82,7 +88,7 @@ public class SnapshotLifecycleIT extends ESRestTestCase { } // Create a snapshot repo - inializeRepo(repoId); + initializeRepo(repoId); createSnapshotPolicy(policyName, "snap", "*/1 * * * * ?", repoId, indexName, true); @@ -101,7 +107,7 @@ public class SnapshotLifecycleIT extends ESRestTestCase { Map metadata = (Map) snapResponse.get("metadata"); assertNotNull(metadata); assertThat(metadata.get("policy"), equalTo(policyName)); - assertHistoryIsPresent(policyName, true, repoId); + assertHistoryIsPresent(policyName, true, repoId, CREATE_OPERATION); // Check that the last success date was written to the cluster state Request getReq = new Request("GET", "/_slm/policy/" + policyName); @@ -122,7 +128,15 @@ public class SnapshotLifecycleIT extends ESRestTestCase { String lastSnapshotName = (String) lastSuccessObject.get("snapshot_name"); assertThat(lastSnapshotName, startsWith("snap-")); - assertHistoryIsPresent(policyName, true, repoId); + assertHistoryIsPresent(policyName, true, repoId, CREATE_OPERATION); + + Map stats = getSLMStats(); + Map policyStats = (Map) stats.get(SnapshotLifecycleStats.POLICY_STATS.getPreferredName()); + Map policyIdStats = (Map) policyStats.get(policyName); + int snapsTaken = (int) policyIdStats.get(SnapshotLifecycleStats.SnapshotPolicyStats.SNAPSHOTS_TAKEN.getPreferredName()); + int totalTaken = (int) stats.get(SnapshotLifecycleStats.TOTAL_TAKEN.getPreferredName()); + assertThat(snapsTaken, greaterThanOrEqualTo(1)); + assertThat(totalTaken, greaterThanOrEqualTo(1)); }); Request delReq = new Request("DELETE", "/_slm/policy/" + policyName); @@ -134,7 +148,7 @@ public class SnapshotLifecycleIT extends ESRestTestCase { final String policyName = "test-policy"; final String repoName = "test-repo"; final String indexPattern = "index-doesnt-exist"; - inializeRepo(repoName); + initializeRepo(repoName); // Create a policy with ignore_unvailable: false and an index that doesn't exist createSnapshotPolicy(policyName, "snap", "*/1 * * * * ?", repoName, indexPattern, false); @@ -163,10 +177,19 @@ public class SnapshotLifecycleIT extends ESRestTestCase { assertNotNull(snapshotName); assertThat(snapshotName, startsWith("snap-")); } - assertHistoryIsPresent(policyName, false, repoName); + assertHistoryIsPresent(policyName, false, repoName, CREATE_OPERATION); + + Map stats = getSLMStats(); + Map policyStats = (Map) stats.get(SnapshotLifecycleStats.POLICY_STATS.getPreferredName()); + Map policyIdStats = (Map) policyStats.get(policyName); + int snapsFailed = (int) policyIdStats.get(SnapshotLifecycleStats.SnapshotPolicyStats.SNAPSHOTS_FAILED.getPreferredName()); + int totalFailed = (int) stats.get(SnapshotLifecycleStats.TOTAL_FAILED.getPreferredName()); + assertThat(snapsFailed, greaterThanOrEqualTo(1)); + assertThat(totalFailed, greaterThanOrEqualTo(1)); }); } + @SuppressWarnings("unchecked") public void testPolicyManualExecution() throws Exception { final String indexName = "test"; final String policyName = "test-policy"; @@ -177,7 +200,7 @@ public class SnapshotLifecycleIT extends ESRestTestCase { } // Create a snapshot repo - inializeRepo(repoId); + initializeRepo(repoId); createSnapshotPolicy(policyName, "snap", "1 2 3 4 5 ?", repoId, indexName, true); @@ -186,82 +209,138 @@ public class SnapshotLifecycleIT extends ESRestTestCase { assertThat(EntityUtils.toString(badResp.getResponse().getEntity()), containsString("no such snapshot lifecycle policy [" + policyName + "-bad]")); - Response goodResp = client().performRequest(new Request("PUT", "/_slm/policy/" + policyName + "/_execute")); + final String snapshotName = executePolicy(policyName); - try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, EntityUtils.toByteArray(goodResp.getEntity()))) { - final String snapshotName = parser.mapStrings().get("snapshot_name"); - - // Check that the executed snapshot is created - assertBusy(() -> { - try { - Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/" + snapshotName)); - Map snapshotResponseMap; - try (InputStream is = response.getEntity().getContent()) { - snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); - } - assertThat(snapshotResponseMap.size(), greaterThan(0)); - final Map metadata = extractMetadata(snapshotResponseMap, snapshotName); - assertNotNull(metadata); - assertThat(metadata.get("policy"), equalTo(policyName)); - assertHistoryIsPresent(policyName, true, repoId); - } catch (ResponseException e) { - fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity())); + // Check that the executed snapshot is created + assertBusy(() -> { + try { + Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/" + snapshotName)); + Map snapshotResponseMap; + try (InputStream is = response.getEntity().getContent()) { + snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); } - }); - } + assertThat(snapshotResponseMap.size(), greaterThan(0)); + final Map metadata = extractMetadata(snapshotResponseMap, snapshotName); + assertNotNull(metadata); + assertThat(metadata.get("policy"), equalTo(policyName)); + assertHistoryIsPresent(policyName, true, repoId, CREATE_OPERATION); + } catch (ResponseException e) { + fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity())); + } + + Map stats = getSLMStats(); + Map policyStats = (Map) stats.get(SnapshotLifecycleStats.POLICY_STATS.getPreferredName()); + Map policyIdStats = (Map) policyStats.get(policyName); + int snapsTaken = (int) policyIdStats.get(SnapshotLifecycleStats.SnapshotPolicyStats.SNAPSHOTS_TAKEN.getPreferredName()); + int totalTaken = (int) stats.get(SnapshotLifecycleStats.TOTAL_TAKEN.getPreferredName()); + assertThat(snapsTaken, equalTo(1)); + assertThat(totalTaken, equalTo(1)); + }); } @SuppressWarnings("unchecked") - public void testSnapshotInProgress() throws Exception { + public void testBasicTimeBasedRetenion() throws Exception { final String indexName = "test"; final String policyName = "test-policy"; final String repoId = "my-repo"; - int docCount = 20; + int docCount = randomIntBetween(10, 50); + List indexReqs = new ArrayList<>(); for (int i = 0; i < docCount; i++) { index(client(), indexName, "" + i, "foo", "bar"); } // Create a snapshot repo - inializeRepo(repoId, 1); + initializeRepo(repoId); - createSnapshotPolicy(policyName, "snap", "1 2 3 4 5 ?", repoId, indexName, true); + // Create a policy with a retention period of 1 millisecond + createSnapshotPolicy(policyName, "snap", "1 2 3 4 5 ?", repoId, indexName, true, + new SnapshotRetentionConfiguration(TimeValue.timeValueMillis(1), null, null)); - Response executeRepsonse = client().performRequest(new Request("PUT", "/_slm/policy/" + policyName + "/_execute")); + // Manually create a snapshot + final String snapshotName = executePolicy(policyName); - try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, EntityUtils.toByteArray(executeRepsonse.getEntity()))) { - final String snapshotName = parser.mapStrings().get("snapshot_name"); - - // Check that the executed snapshot shows up in the SLM output - assertBusy(() -> { - try { - Response response = client().performRequest(new Request("GET", "/_slm/policy" + (randomBoolean() ? "" : "?human"))); - Map policyResponseMap; - try (InputStream content = response.getEntity().getContent()) { - policyResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), content, true); - } - assertThat(policyResponseMap.size(), greaterThan(0)); - Optional> inProgress = Optional.ofNullable((Map) policyResponseMap.get(policyName)) - .map(policy -> (Map) policy.get("in_progress")); - - if (inProgress.isPresent()) { - Map inProgressMap = inProgress.get(); - assertThat(inProgressMap.get("name"), equalTo(snapshotName)); - assertNotNull(inProgressMap.get("uuid")); - assertThat(inProgressMap.get("state"), equalTo("STARTED")); - assertThat((long) inProgressMap.get("start_time_millis"), greaterThan(0L)); - assertNull(inProgressMap.get("failure")); - } else { - fail("expected in_progress to contain a running snapshot, but the response was " + policyResponseMap); - } - } catch (ResponseException e) { - fail("expected policy to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity())); + // Check that the executed snapshot is created + assertBusy(() -> { + try { + Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/" + snapshotName)); + Map snapshotResponseMap; + try (InputStream is = response.getEntity().getContent()) { + snapshotResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); } - }); + assertThat(snapshotResponseMap.size(), greaterThan(0)); + final Map metadata = extractMetadata(snapshotResponseMap, snapshotName); + assertNotNull(metadata); + assertThat(metadata.get("policy"), equalTo(policyName)); + assertHistoryIsPresent(policyName, true, repoId, CREATE_OPERATION); + } catch (ResponseException e) { + fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity())); + } + }); - // Cancel the snapshot since it is not going to complete quickly - assertOK(client().performRequest(new Request("DELETE", "/_snapshot/" + repoId + "/" + snapshotName))); + // Run retention every second + ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(); + req.transientSettings(Settings.builder().put(LifecycleSettings.SLM_RETENTION_SCHEDULE, "*/1 * * * * ?")); + try (XContentBuilder builder = jsonBuilder()) { + req.toXContent(builder, ToXContent.EMPTY_PARAMS); + Request r = new Request("PUT", "/_cluster/settings"); + r.setJsonEntity(Strings.toString(builder)); + Response updateSettingsResp = client().performRequest(r); + } + + try { + // Check that the snapshot created by the policy has been removed by retention + assertBusy(() -> { + // We expect a failed response because the snapshot should not exist + try { + logger.info("--> checking to see if snapshot has been deleted..."); + Response response = client().performRequest(new Request("GET", "/_snapshot/" + repoId + "/" + snapshotName)); + assertThat(EntityUtils.toString(response.getEntity()), containsString("snapshot_missing_exception")); + } catch (ResponseException e) { + assertThat(EntityUtils.toString(e.getResponse().getEntity()), containsString("snapshot_missing_exception")); + } + assertHistoryIsPresent(policyName, true, repoId, DELETE_OPERATION); + + Map stats = getSLMStats(); + Map policyStats = (Map) stats.get(SnapshotLifecycleStats.POLICY_STATS.getPreferredName()); + Map policyIdStats = (Map) policyStats.get(policyName); + int snapsTaken = (int) policyIdStats.get(SnapshotLifecycleStats.SnapshotPolicyStats.SNAPSHOTS_TAKEN.getPreferredName()); + int snapsDeleted = (int) policyIdStats.get(SnapshotLifecycleStats.SnapshotPolicyStats.SNAPSHOTS_DELETED.getPreferredName()); + int retentionRun = (int) stats.get(SnapshotLifecycleStats.RETENTION_RUNS.getPreferredName()); + int totalTaken = (int) stats.get(SnapshotLifecycleStats.TOTAL_TAKEN.getPreferredName()); + int totalDeleted = (int) stats.get(SnapshotLifecycleStats.TOTAL_DELETIONS.getPreferredName()); + assertThat(snapsTaken, equalTo(1)); + assertThat(totalTaken, equalTo(1)); + assertThat(retentionRun, greaterThanOrEqualTo(1)); + assertThat(snapsDeleted, greaterThanOrEqualTo(1)); + assertThat(totalDeleted, greaterThanOrEqualTo(1)); + }, 60, TimeUnit.SECONDS); + + } finally { + // Unset retention + ClusterUpdateSettingsRequest unsetRequest = new ClusterUpdateSettingsRequest(); + unsetRequest.transientSettings(Settings.builder().put(LifecycleSettings.SLM_RETENTION_SCHEDULE, (String) null)); + try (XContentBuilder builder = jsonBuilder()) { + unsetRequest.toXContent(builder, ToXContent.EMPTY_PARAMS); + Request r = new Request("PUT", "/_cluster/settings"); + r.setJsonEntity(Strings.toString(builder)); + client().performRequest(r); + } + } + } + + /** + * Execute the given policy and return the generated snapshot name + */ + private String executePolicy(String policyId) { + try { + Response executeRepsonse = client().performRequest(new Request("PUT", "/_slm/policy/" + policyId + "/_execute")); + try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, EntityUtils.toByteArray(executeRepsonse.getEntity()))) { + return parser.mapStrings().get("snapshot_name"); + } + } catch (Exception e) { + fail("failed to execute policy " + policyId + " - got: " + e); + throw new RuntimeException(e); } } @@ -275,8 +354,20 @@ public class SnapshotLifecycleIT extends ESRestTestCase { .orElse(null); } + private Map getSLMStats() { + try { + Response response = client().performRequest(new Request("GET", "/_slm/stats")); + try (InputStream content = response.getEntity().getContent()) { + return XContentHelper.convertToMap(XContentType.JSON.xContent(), content, true); + } + } catch (Exception e) { + fail("exception retrieving stats: " + e); + throw new ElasticsearchException(e); + } + } + // This method should be called inside an assertBusy, it has no retry logic of its own - private void assertHistoryIsPresent(String policyName, boolean success, String repository) throws IOException { + private void assertHistoryIsPresent(String policyName, boolean success, String repository, String operation) throws IOException { final Request historySearchRequest = new Request("GET", ".slm-history*/_search"); historySearchRequest.setJsonEntity("{\n" + " \"query\": {\n" + @@ -299,7 +390,7 @@ public class SnapshotLifecycleIT extends ESRestTestCase { " },\n" + " {\n" + " \"term\": {\n" + - " \"operation\": \"CREATE\"\n" + + " \"operation\": \"" + operation + "\"\n" + " }\n" + " }\n" + " ]\n" + @@ -334,6 +425,13 @@ public class SnapshotLifecycleIT extends ESRestTestCase { private void createSnapshotPolicy(String policyName, String snapshotNamePattern, String schedule, String repoId, String indexPattern, boolean ignoreUnavailable) throws IOException { + createSnapshotPolicy(policyName, snapshotNamePattern, schedule, repoId, indexPattern, + ignoreUnavailable, SnapshotRetentionConfiguration.EMPTY); + } + + private void createSnapshotPolicy(String policyName, String snapshotNamePattern, String schedule, String repoId, + String indexPattern, boolean ignoreUnavailable, + SnapshotRetentionConfiguration retention) throws IOException { Map snapConfig = new HashMap<>(); snapConfig.put("indices", Collections.singletonList(indexPattern)); snapConfig.put("ignore_unavailable", ignoreUnavailable); @@ -345,7 +443,8 @@ public class SnapshotLifecycleIT extends ESRestTestCase { () -> randomAlphaOfLength(5)), randomAlphaOfLength(4)); } } - SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(policyName, snapshotNamePattern, schedule, repoId, snapConfig); + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(policyName, snapshotNamePattern, schedule, + repoId, snapConfig, retention); Request putLifecycle = new Request("PUT", "/_slm/policy/" + policyName); XContentBuilder lifecycleBuilder = JsonXContent.contentBuilder(); @@ -354,11 +453,11 @@ public class SnapshotLifecycleIT extends ESRestTestCase { assertOK(client().performRequest(putLifecycle)); } - private void inializeRepo(String repoName) throws IOException { - inializeRepo(repoName, 256); + private void initializeRepo(String repoName) throws IOException { + initializeRepo(repoName, "40mb"); } - private void inializeRepo(String repoName, int maxBytesPerSecond) throws IOException { + private void initializeRepo(String repoName, String maxBytesPerSecond) throws IOException { Request request = new Request("PUT", "/_snapshot/" + repoName); request.setJsonEntity(Strings .toString(JsonXContent.contentBuilder() @@ -367,7 +466,7 @@ public class SnapshotLifecycleIT extends ESRestTestCase { .startObject("settings") .field("compress", randomBoolean()) .field("location", System.getProperty("tests.path.repo")) - .field("max_snapshot_bytes_per_sec", maxBytesPerSecond + "b") + .field("max_snapshot_bytes_per_sec", maxBytesPerSecond) .endObject() .endObject())); assertOK(client().performRequest(request)); diff --git a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java index 9c131defa83..62c140bce79 100644 --- a/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java +++ b/x-pack/plugin/ilm/qa/with-security/src/test/java/org/elasticsearch/xpack/security/PermissionsIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.client.slm.ExecuteSnapshotLifecyclePolicyResponse; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.client.slm.SnapshotRetentionConfiguration; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -189,7 +190,7 @@ public class PermissionsIT extends ESRestTestCase { Map config = new HashMap<>(); config.put("indices", Collections.singletonList("index")); SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( - "policy_id", "name", "1 2 3 * * ?", "my_repository", config); + "policy_id", "name", "1 2 3 * * ?", "my_repository", config, SnapshotRetentionConfiguration.EMPTY); PutSnapshotLifecyclePolicyRequest request = new PutSnapshotLifecyclePolicyRequest(policy); expectThrows(ElasticsearchStatusException.class, diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java index 25ee9351780..2fc2033ab44 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java @@ -65,6 +65,7 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.action.DeleteSnapshotLifecycleAction; import org.elasticsearch.xpack.core.slm.action.ExecuteSnapshotLifecycleAction; import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleStatsAction; import org.elasticsearch.xpack.core.slm.action.PutSnapshotLifecycleAction; import org.elasticsearch.xpack.core.slm.history.SnapshotHistoryStore; import org.elasticsearch.xpack.core.slm.history.SnapshotLifecycleTemplateRegistry; @@ -90,13 +91,17 @@ import org.elasticsearch.xpack.ilm.action.TransportStartILMAction; import org.elasticsearch.xpack.ilm.action.TransportStopILMAction; import org.elasticsearch.xpack.slm.SnapshotLifecycleService; import org.elasticsearch.xpack.slm.SnapshotLifecycleTask; +import org.elasticsearch.xpack.slm.SnapshotRetentionService; +import org.elasticsearch.xpack.slm.SnapshotRetentionTask; import org.elasticsearch.xpack.slm.action.RestDeleteSnapshotLifecycleAction; import org.elasticsearch.xpack.slm.action.RestExecuteSnapshotLifecycleAction; import org.elasticsearch.xpack.slm.action.RestGetSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.action.RestGetSnapshotLifecycleStatsAction; import org.elasticsearch.xpack.slm.action.RestPutSnapshotLifecycleAction; import org.elasticsearch.xpack.slm.action.TransportDeleteSnapshotLifecycleAction; import org.elasticsearch.xpack.slm.action.TransportExecuteSnapshotLifecycleAction; import org.elasticsearch.xpack.slm.action.TransportGetSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.action.TransportGetSnapshotLifecycleStatsAction; import org.elasticsearch.xpack.slm.action.TransportPutSnapshotLifecycleAction; import java.io.IOException; @@ -108,20 +113,22 @@ import java.util.Collections; import java.util.List; import java.util.function.Supplier; -import static java.util.Collections.emptyList; import static org.elasticsearch.xpack.core.ClientHelper.INDEX_LIFECYCLE_ORIGIN; public class IndexLifecycle extends Plugin implements ActionPlugin { private final SetOnce indexLifecycleInitialisationService = new SetOnce<>(); private final SetOnce snapshotLifecycleService = new SetOnce<>(); + private final SetOnce snapshotRetentionService = new SetOnce<>(); private final SetOnce snapshotHistoryStore = new SetOnce<>(); private Settings settings; - private boolean enabled; + private boolean ilmEnabled; + private boolean slmEnabled; private boolean transportClientMode; public IndexLifecycle(Settings settings) { this.settings = settings; - this.enabled = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(settings); + this.ilmEnabled = XPackSettings.INDEX_LIFECYCLE_ENABLED.get(settings); + this.slmEnabled = XPackSettings.SNAPSHOT_LIFECYCLE_ENABLED.get(settings); this.transportClientMode = XPackPlugin.transportClientMode(settings); } @@ -147,9 +154,11 @@ public class IndexLifecycle extends Plugin implements ActionPlugin { return Arrays.asList( LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING, LifecycleSettings.LIFECYCLE_NAME_SETTING, + LifecycleSettings.LIFECYCLE_ORIGINATION_DATE_SETTING, LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING, RolloverAction.LIFECYCLE_ROLLOVER_ALIAS_SETTING, - LifecycleSettings.SLM_HISTORY_INDEX_ENABLED_SETTING); + LifecycleSettings.SLM_HISTORY_INDEX_ENABLED_SETTING, + LifecycleSettings.SLM_RETENTION_SCHEDULE_SETTING); } @Override @@ -157,18 +166,28 @@ public class IndexLifecycle extends Plugin implements ActionPlugin { ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - if (enabled == false || transportClientMode) { - return emptyList(); + if (transportClientMode) { + return Collections.emptyList(); } - indexLifecycleInitialisationService.set(new IndexLifecycleService(settings, client, clusterService, threadPool, + final List components = new ArrayList<>(); + if (ilmEnabled) { + indexLifecycleInitialisationService.set(new IndexLifecycleService(settings, client, clusterService, threadPool, getClock(), System::currentTimeMillis, xContentRegistry)); - SnapshotLifecycleTemplateRegistry templateRegistry = new SnapshotLifecycleTemplateRegistry(settings, clusterService, threadPool, - client, xContentRegistry); - snapshotHistoryStore.set(new SnapshotHistoryStore(settings, new OriginSettingClient(client, INDEX_LIFECYCLE_ORIGIN), clusterService - )); - snapshotLifecycleService.set(new SnapshotLifecycleService(settings, - () -> new SnapshotLifecycleTask(client, clusterService, snapshotHistoryStore.get()), clusterService, getClock())); - return Arrays.asList(indexLifecycleInitialisationService.get(), snapshotLifecycleService.get(), snapshotHistoryStore.get()); + components.add(indexLifecycleInitialisationService.get()); + } + if (slmEnabled) { + SnapshotLifecycleTemplateRegistry templateRegistry = new SnapshotLifecycleTemplateRegistry(settings, clusterService, threadPool, + client, xContentRegistry); + snapshotHistoryStore.set(new SnapshotHistoryStore(settings, new OriginSettingClient(client, INDEX_LIFECYCLE_ORIGIN), + clusterService)); + snapshotLifecycleService.set(new SnapshotLifecycleService(settings, + () -> new SnapshotLifecycleTask(client, clusterService, snapshotHistoryStore.get()), clusterService, getClock())); + snapshotRetentionService.set(new SnapshotRetentionService(settings, + () -> new SnapshotRetentionTask(client, clusterService, System::nanoTime, snapshotHistoryStore.get(), threadPool), + clusterService, getClock())); + components.addAll(Arrays.asList(snapshotLifecycleService.get(), snapshotHistoryStore.get(), snapshotRetentionService.get())); + } + return components; } @Override @@ -204,10 +223,9 @@ public class IndexLifecycle extends Plugin implements ActionPlugin { public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, Supplier nodesInCluster) { - if (enabled == false) { - return emptyList(); - } - return Arrays.asList( + List handlers = new ArrayList<>(); + if (ilmEnabled) { + handlers.addAll(Arrays.asList( new RestPutLifecycleAction(restController), new RestGetLifecycleAction(restController), new RestDeleteLifecycleAction(restController), @@ -217,21 +235,26 @@ public class IndexLifecycle extends Plugin implements ActionPlugin { new RestRetryAction(restController), new RestStopAction(restController), new RestStartILMAction(restController), - new RestGetStatusAction(restController), - // Snapshot lifecycle actions + new RestGetStatusAction(restController) + )); + } + if (slmEnabled) { + handlers.addAll(Arrays.asList( new RestPutSnapshotLifecycleAction(restController), new RestDeleteSnapshotLifecycleAction(restController), new RestGetSnapshotLifecycleAction(restController), - new RestExecuteSnapshotLifecycleAction(restController) - ); + new RestExecuteSnapshotLifecycleAction(restController), + new RestGetSnapshotLifecycleStatsAction(restController) + )); + } + return handlers; } @Override public List> getActions() { - if (enabled == false) { - return emptyList(); - } - return Arrays.asList( + List> actions = new ArrayList<>(); + if (ilmEnabled) { + actions.addAll(Arrays.asList( new ActionHandler<>(PutLifecycleAction.INSTANCE, TransportPutLifecycleAction.class), new ActionHandler<>(GetLifecycleAction.INSTANCE, TransportGetLifecycleAction.class), new ActionHandler<>(DeleteLifecycleAction.INSTANCE, TransportDeleteLifecycleAction.class), @@ -241,18 +264,25 @@ public class IndexLifecycle extends Plugin implements ActionPlugin { new ActionHandler<>(RetryAction.INSTANCE, TransportRetryAction.class), new ActionHandler<>(StartILMAction.INSTANCE, TransportStartILMAction.class), new ActionHandler<>(StopILMAction.INSTANCE, TransportStopILMAction.class), - new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class), - // Snapshot lifecycle actions + new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class) + )); + } + if (slmEnabled) { + actions.addAll(Arrays.asList( new ActionHandler<>(PutSnapshotLifecycleAction.INSTANCE, TransportPutSnapshotLifecycleAction.class), new ActionHandler<>(DeleteSnapshotLifecycleAction.INSTANCE, TransportDeleteSnapshotLifecycleAction.class), new ActionHandler<>(GetSnapshotLifecycleAction.INSTANCE, TransportGetSnapshotLifecycleAction.class), - new ActionHandler<>(ExecuteSnapshotLifecycleAction.INSTANCE, TransportExecuteSnapshotLifecycleAction.class)); + new ActionHandler<>(ExecuteSnapshotLifecycleAction.INSTANCE, TransportExecuteSnapshotLifecycleAction.class), + new ActionHandler<>(GetSnapshotLifecycleStatsAction.INSTANCE, TransportGetSnapshotLifecycleStatsAction.class) + )); + } + return actions; } @Override public void close() { try { - IOUtils.close(indexLifecycleInitialisationService.get(), snapshotLifecycleService.get()); + IOUtils.close(indexLifecycleInitialisationService.get(), snapshotLifecycleService.get(), snapshotRetentionService.get()); } catch (IOException e) { throw new ElasticsearchException("unable to close index lifecycle services", e); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java index 81e49da25e6..0e578315d31 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java @@ -50,6 +50,7 @@ import java.util.function.LongSupplier; import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.elasticsearch.xpack.core.ilm.LifecycleSettings.LIFECYCLE_ORIGINATION_DATE; public class IndexLifecycleRunner { private static final Logger logger = LogManager.getLogger(IndexLifecycleRunner.class); @@ -73,11 +74,12 @@ public class IndexLifecycleRunner { */ boolean isReadyToTransitionToThisPhase(final String policy, final IndexMetaData indexMetaData, final String phase) { LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(indexMetaData); - if (lifecycleState.getLifecycleDate() == null) { - logger.trace("no index creation date has been set yet"); + Long originationDate = indexMetaData.getSettings().getAsLong(LIFECYCLE_ORIGINATION_DATE, -1L); + if (lifecycleState.getLifecycleDate() == null && originationDate == -1L) { + logger.trace("no index creation or origination date has been set yet"); return true; } - final Long lifecycleDate = lifecycleState.getLifecycleDate(); + final Long lifecycleDate = originationDate != -1L ? originationDate : lifecycleState.getLifecycleDate(); assert lifecycleDate != null && lifecycleDate >= 0 : "expected index to have a lifecycle date but it did not"; final TimeValue after = stepRegistry.getIndexAgeForPhase(policy, phase); final long now = nowSupplier.getAsLong(); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTask.java index c899a51c28f..53d4a5307b0 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTask.java @@ -74,7 +74,8 @@ public class OperationModeUpdateTask extends ClusterStateUpdateTask { return ClusterState.builder(currentState) .metaData(MetaData.builder(currentState.metaData()) .putCustom(SnapshotLifecycleMetadata.TYPE, - new SnapshotLifecycleMetadata(currentMetadata.getSnapshotConfigurations(), newMode))) + new SnapshotLifecycleMetadata(currentMetadata.getSnapshotConfigurations(), + newMode, currentMetadata.getStats()))) .build(); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java index af347a6c56c..e8bd385f171 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java @@ -41,6 +41,8 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.core.ilm.LifecycleSettings.LIFECYCLE_ORIGINATION_DATE; + public class TransportExplainLifecycleAction extends TransportClusterInfoAction { @@ -107,8 +109,9 @@ public class TransportExplainLifecycleAction // If this is requesting only errors, only include indices in the error step or which are using a nonexistent policy if (request.onlyErrors() == false || (ErrorStep.NAME.equals(lifecycleState.getStep()) || indexLifecycleService.policyExists(policyName) == false)) { + Long originationDate = idxSettings.getAsLong(LIFECYCLE_ORIGINATION_DATE, -1L); indexResponse = IndexLifecycleExplainResponse.newManagedIndexResponse(index, policyName, - lifecycleState.getLifecycleDate(), + originationDate != -1L ? originationDate : lifecycleState.getLifecycleDate(), lifecycleState.getPhase(), lifecycleState.getAction(), lifecycleState.getStep(), diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java index d2435324ea5..0d27584d83e 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java @@ -104,7 +104,7 @@ public class SnapshotLifecycleService implements LocalNodeMasterListener, Closea /** * Returns true if ILM is in the stopped or stopped state */ - private static boolean ilmStoppedOrStopping(ClusterState state) { + static boolean ilmStoppedOrStopping(ClusterState state) { return Optional.ofNullable((SnapshotLifecycleMetadata) state.metaData().custom(SnapshotLifecycleMetadata.TYPE)) .map(SnapshotLifecycleMetadata::getOperationMode) .map(mode -> OperationMode.STOPPING == mode || OperationMode.STOPPED == mode) diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java index 7df325fb16a..4c740f42786 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java @@ -94,7 +94,8 @@ public class SnapshotLifecycleTask implements SchedulerEngine.Listener { final long timestamp = Instant.now().toEpochMilli(); clusterService.submitStateUpdateTask("slm-record-success-" + policyMetadata.getPolicy().getId(), WriteJobStatus.success(policyMetadata.getPolicy().getId(), request.snapshot(), timestamp)); - historyStore.putAsync(SnapshotHistoryItem.successRecord(timestamp, policyMetadata.getPolicy(), request.snapshot())); + historyStore.putAsync(SnapshotHistoryItem.creationSuccessRecord(timestamp, policyMetadata.getPolicy(), + request.snapshot())); } @Override @@ -106,7 +107,8 @@ public class SnapshotLifecycleTask implements SchedulerEngine.Listener { WriteJobStatus.failure(policyMetadata.getPolicy().getId(), request.snapshot(), timestamp, e)); final SnapshotHistoryItem failureRecord; try { - failureRecord = SnapshotHistoryItem.failureRecord(timestamp, policyMetadata.getPolicy(), request.snapshot(), e); + failureRecord = SnapshotHistoryItem.creationFailureRecord(timestamp, policyMetadata.getPolicy(), + request.snapshot(), e); historyStore.putAsync(failureRecord); } catch (IOException ex) { // This shouldn't happen unless there's an issue with serializing the original exception, which shouldn't happen @@ -192,15 +194,19 @@ public class SnapshotLifecycleTask implements SchedulerEngine.Listener { } SnapshotLifecyclePolicyMetadata.Builder newPolicyMetadata = SnapshotLifecyclePolicyMetadata.builder(policyMetadata); + final SnapshotLifecycleStats stats = snapMeta.getStats(); if (exception.isPresent()) { + stats.snapshotFailed(policyName); newPolicyMetadata.setLastFailure(new SnapshotInvocationRecord(snapshotName, timestamp, exceptionToString())); } else { + stats.snapshotTaken(policyName); newPolicyMetadata.setLastSuccess(new SnapshotInvocationRecord(snapshotName, timestamp, null)); } snapLifecycles.put(policyName, newPolicyMetadata.build()); - SnapshotLifecycleMetadata lifecycleMetadata = new SnapshotLifecycleMetadata(snapLifecycles, snapMeta.getOperationMode()); + SnapshotLifecycleMetadata lifecycleMetadata = new SnapshotLifecycleMetadata(snapLifecycles, + snapMeta.getOperationMode(), stats); MetaData currentMeta = currentState.metaData(); return ClusterState.builder(currentState) .metaData(MetaData.builder(currentMeta) diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionService.java new file mode 100644 index 00000000000..36a60ffdf93 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionService.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.core.scheduler.CronSchedule; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; + +import java.io.Closeable; +import java.time.Clock; +import java.util.function.Supplier; + +/** + * The {@code SnapshotRetentionService} is responsible for scheduling the period kickoff of SLM's + * snapshot retention. This means that when the retention schedule setting is configured, the + * scheduler schedules a job that, when triggered, will delete snapshots according to the retention + * policy configured in the {@link SnapshotLifecyclePolicy}. + */ +public class SnapshotRetentionService implements LocalNodeMasterListener, Closeable { + + static final String SLM_RETENTION_JOB_ID = "slm-retention-job"; + + private static final Logger logger = LogManager.getLogger(SnapshotRetentionService.class); + + private final SchedulerEngine scheduler; + + private volatile String slmRetentionSchedule; + private volatile boolean isMaster = false; + + public SnapshotRetentionService(Settings settings, + Supplier taskSupplier, + ClusterService clusterService, + Clock clock) { + this.scheduler = new SchedulerEngine(settings, clock); + this.scheduler.register(taskSupplier.get()); + this.slmRetentionSchedule = LifecycleSettings.SLM_RETENTION_SCHEDULE_SETTING.get(settings); + clusterService.addLocalNodeMasterListener(this); + clusterService.getClusterSettings().addSettingsUpdateConsumer(LifecycleSettings.SLM_RETENTION_SCHEDULE_SETTING, + this::setUpdateSchedule); + } + + void setUpdateSchedule(String retentionSchedule) { + this.slmRetentionSchedule = retentionSchedule; + // The schedule has changed, so reschedule the retention job + rescheduleRetentionJob(); + } + + // Only used for testing + SchedulerEngine getScheduler() { + return this.scheduler; + } + + @Override + public void onMaster() { + this.isMaster = true; + rescheduleRetentionJob(); + } + + @Override + public void offMaster() { + this.isMaster = false; + cancelRetentionJob(); + } + + private void rescheduleRetentionJob() { + final String schedule = this.slmRetentionSchedule; + if (this.isMaster && Strings.hasText(schedule)) { + final SchedulerEngine.Job retentionJob = new SchedulerEngine.Job(SLM_RETENTION_JOB_ID, + new CronSchedule(schedule)); + logger.debug("scheduling SLM retention job for [{}]", schedule); + this.scheduler.add(retentionJob); + } else { + // The schedule has been unset, so cancel the scheduled retention job + cancelRetentionJob(); + } + } + + private void cancelRetentionJob() { + this.scheduler.scheduledJobIds().forEach(this.scheduler::remove); + } + + @Override + public String executorName() { + return ThreadPool.Names.SNAPSHOT; + } + + @Override + public void close() { + this.scheduler.stop(); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java new file mode 100644 index 00000000000..368dbcae678 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java @@ -0,0 +1,506 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.RepositoryCleanupInProgress; +import org.elasticsearch.cluster.RestoreInProgress; +import org.elasticsearch.cluster.SnapshotDeletionsInProgress; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; +import org.elasticsearch.xpack.core.slm.history.SnapshotHistoryItem; +import org.elasticsearch.xpack.core.slm.history.SnapshotHistoryStore; + +import java.io.IOException; +import java.time.Instant; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Consumer; +import java.util.function.LongSupplier; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy.POLICY_ID_METADATA_FIELD; + +/** + * The {@code SnapshotRetentionTask} is invoked by the scheduled job from the + * {@link SnapshotRetentionService}. It is responsible for retrieving the snapshots for repositories + * that have an SLM policy configured, and then deleting the snapshots that fall outside the + * retention policy. + */ +public class SnapshotRetentionTask implements SchedulerEngine.Listener { + + private static final Logger logger = LogManager.getLogger(SnapshotRetentionTask.class); + private static final AtomicBoolean running = new AtomicBoolean(false); + + private final Client client; + private final ClusterService clusterService; + private final LongSupplier nowNanoSupplier; + private final ThreadPool threadPool; + private final SnapshotHistoryStore historyStore; + + public SnapshotRetentionTask(Client client, ClusterService clusterService, LongSupplier nowNanoSupplier, + SnapshotHistoryStore historyStore, ThreadPool threadPool) { + this.client = new OriginSettingClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN); + this.clusterService = clusterService; + this.nowNanoSupplier = nowNanoSupplier; + this.historyStore = historyStore; + this.threadPool = threadPool; + } + + @Override + public void triggered(SchedulerEngine.Event event) { + assert event.getJobName().equals(SnapshotRetentionService.SLM_RETENTION_JOB_ID) : + "expected id to be " + SnapshotRetentionService.SLM_RETENTION_JOB_ID + " but it was " + event.getJobName(); + + final ClusterState state = clusterService.state(); + if (SnapshotLifecycleService.ilmStoppedOrStopping(state)) { + logger.debug("skipping SLM retention as ILM is currently stopped or stopping"); + return; + } + + if (running.compareAndSet(false, true)) { + final SnapshotLifecycleStats slmStats = new SnapshotLifecycleStats(); + + // Defined here so it can be re-used without having to repeat it + final Consumer failureHandler = e -> { + try { + logger.error("error during snapshot retention task", e); + slmStats.retentionFailed(); + updateStateWithStats(slmStats); + } finally { + running.set(false); + } + }; + + try { + final TimeValue maxDeletionTime = LifecycleSettings.SLM_RETENTION_DURATION_SETTING.get(state.metaData().settings()); + + logger.info("starting SLM retention snapshot cleanup task"); + slmStats.retentionRun(); + // Find all SLM policies that have retention enabled + final Map policiesWithRetention = getAllPoliciesWithRetentionEnabled(state); + + // For those policies (there may be more than one for the same repo), + // return the repos that we need to get the snapshots for + final Set repositioriesToFetch = policiesWithRetention.values().stream() + .map(SnapshotLifecyclePolicy::getRepository) + .collect(Collectors.toSet()); + + if (repositioriesToFetch.isEmpty()) { + running.set(false); + return; + } + + // Finally, asynchronously retrieve all the snapshots, deleting them serially, + // before updating the cluster state with the new metrics and setting 'running' + // back to false + getAllSuccessfulSnapshots(repositioriesToFetch, new ActionListener>>() { + @Override + public void onResponse(Map> allSnapshots) { + try { + // Find all the snapshots that are past their retention date + final Map> snapshotsToBeDeleted = allSnapshots.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, + e -> e.getValue().stream() + .filter(snapshot -> snapshotEligibleForDeletion(snapshot, allSnapshots, policiesWithRetention)) + .collect(Collectors.toList()))); + + // Finally, delete the snapshots that need to be deleted + maybeDeleteSnapshots(snapshotsToBeDeleted, maxDeletionTime, slmStats); + + updateStateWithStats(slmStats); + } finally { + running.set(false); + } + } + + @Override + public void onFailure(Exception e) { + failureHandler.accept(e); + } + }, failureHandler); + } catch (Exception e) { + failureHandler.accept(e); + } + } else { + logger.trace("snapshot lifecycle retention task started, but a task is already running, skipping"); + } + } + + static Map getAllPoliciesWithRetentionEnabled(final ClusterState state) { + final SnapshotLifecycleMetadata snapMeta = state.metaData().custom(SnapshotLifecycleMetadata.TYPE); + if (snapMeta == null) { + return Collections.emptyMap(); + } + return snapMeta.getSnapshotConfigurations().entrySet().stream() + .filter(e -> e.getValue().getPolicy().getRetentionPolicy() != null) + .filter(e -> e.getValue().getPolicy().getRetentionPolicy().equals(SnapshotRetentionConfiguration.EMPTY) == false) + .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getPolicy())); + } + + static boolean snapshotEligibleForDeletion(SnapshotInfo snapshot, Map> allSnapshots, + Map policies) { + if (snapshot.userMetadata() == null) { + // This snapshot has no metadata, it is not eligible for deletion + return false; + } + + final String policyId; + try { + policyId = (String) snapshot.userMetadata().get(POLICY_ID_METADATA_FIELD); + } catch (Exception e) { + logger.debug("unable to retrieve policy id from snapshot metadata [" + snapshot.userMetadata() + "]", e); + return false; + } + + if (policyId == null) { + // policyId was null in the metadata, so it's not eligible + return false; + } + + SnapshotLifecyclePolicy policy = policies.get(policyId); + if (policy == null) { + // This snapshot was taking by a policy that doesn't exist, so it's not eligible + return false; + } + + SnapshotRetentionConfiguration retention = policy.getRetentionPolicy(); + if (retention == null || retention.equals(SnapshotRetentionConfiguration.EMPTY)) { + // Retention is not configured + return false; + } + + final String repository = policy.getRepository(); + // Retrieve the predicate based on the retention policy, passing in snapshots pertaining only to *this* policy and repository + boolean eligible = retention.getSnapshotDeletionPredicate( + allSnapshots.get(repository).stream() + .filter(info -> Optional.ofNullable(info.userMetadata()) + .map(meta -> meta.get(POLICY_ID_METADATA_FIELD)) + .map(pId -> pId.equals(policyId)) + .orElse(false)) + .collect(Collectors.toList())) + .test(snapshot); + logger.debug("[{}] testing snapshot [{}] deletion eligibility: {}", + repository, snapshot.snapshotId(), eligible ? "ELIGIBLE" : "INELIGIBLE"); + return eligible; + } + + void getAllSuccessfulSnapshots(Collection repositories, ActionListener>> listener, + Consumer errorHandler) { + if (repositories.isEmpty()) { + // Skip retrieving anything if there are no repositories to fetch + listener.onResponse(Collections.emptyMap()); + } + + threadPool.generic().execute(() -> { + final Map> snapshots = new ConcurrentHashMap<>(); + final CountDown countDown = new CountDown(repositories.size()); + final Runnable onComplete = () -> { + if (countDown.countDown()) { + listener.onResponse(snapshots); + } + }; + for (String repository : repositories) { + client.admin().cluster() + .prepareGetSnapshots(repository) + .execute(new ActionListener() { + @Override + public void onResponse(GetSnapshotsResponse resp) { + try { + snapshots.compute(repository, (k, previousSnaps) -> { + if (previousSnaps != null) { + throw new IllegalStateException("duplicate snapshot retrieval for repository" + repository); + } + return resp.getSnapshots().stream() + .filter(info -> info.state() == SnapshotState.SUCCESS) + .collect(Collectors.toList()); + }); + onComplete.run(); + } catch (Exception e) { + logger.error(new ParameterizedMessage("exception computing snapshots for repository {}", repository), e); + throw e; + } + } + + @Override + public void onFailure(Exception e) { + logger.warn(new ParameterizedMessage("unable to retrieve snapshots for repository [{}]", repository), e); + onComplete.run(); + } + }); + } + }); + } + + static String getPolicyId(SnapshotInfo snapshotInfo) { + return Optional.ofNullable(snapshotInfo.userMetadata()) + .filter(meta -> meta.get(SnapshotLifecyclePolicy.POLICY_ID_METADATA_FIELD) != null) + .filter(meta -> meta.get(SnapshotLifecyclePolicy.POLICY_ID_METADATA_FIELD) instanceof String) + .map(meta -> (String) meta.get(SnapshotLifecyclePolicy.POLICY_ID_METADATA_FIELD)) + .orElseThrow(() -> new IllegalStateException("expected snapshot " + snapshotInfo + + " to have a policy in its metadata, but it did not")); + } + + /** + * Maybe delete the given snapshots. If a snapshot is currently running according to the cluster + * state, this waits (using a {@link ClusterStateObserver} until a cluster state with no running + * snapshots before executing the blocking + * {@link #deleteSnapshots(Map, TimeValue, SnapshotLifecycleStats)} request. At most, we wait + * for the maximum allowed deletion time before timing out waiting for a state with no + * running snapshots. + * + * It's possible the task may still run into a SnapshotInProgressException, if a snapshot is + * started between the state retrieved here and the actual deletion. Since is is expected to be + * a rare case, no special handling is present. + */ + private void maybeDeleteSnapshots(Map> snapshotsToDelete, + TimeValue maximumTime, + SnapshotLifecycleStats slmStats) { + int count = snapshotsToDelete.values().stream().mapToInt(List::size).sum(); + if (count == 0) { + logger.debug("no snapshots are eligible for deletion"); + return; + } + + ClusterState state = clusterService.state(); + if (okayToDeleteSnapshots(state)) { + deleteSnapshots(snapshotsToDelete, maximumTime, slmStats); + } else { + logger.debug("a snapshot is currently running, rescheduling SLM retention for after snapshot has completed"); + ClusterStateObserver observer = new ClusterStateObserver(clusterService, maximumTime, logger, threadPool.getThreadContext()); + CountDownLatch latch = new CountDownLatch(1); + observer.waitForNextChange( + new NoSnapshotRunningListener(observer, + newState -> threadPool.executor(ThreadPool.Names.MANAGEMENT).execute(() -> { + try { + deleteSnapshots(snapshotsToDelete, maximumTime, slmStats); + } finally { + latch.countDown(); + } + }), + e -> { + latch.countDown(); + throw new ElasticsearchException(e); + })); + try { + latch.await(); + } catch (InterruptedException e) { + throw new ElasticsearchException(e); + } + } + } + + void deleteSnapshots(Map> snapshotsToDelete, + TimeValue maximumTime, + SnapshotLifecycleStats slmStats) { + int count = snapshotsToDelete.values().stream().mapToInt(List::size).sum(); + + logger.info("starting snapshot retention deletion for [{}] snapshots", count); + long startTime = nowNanoSupplier.getAsLong(); + final AtomicInteger deleted = new AtomicInteger(0); + final AtomicInteger failed = new AtomicInteger(0); + for (Map.Entry> entry : snapshotsToDelete.entrySet()) { + String repo = entry.getKey(); + List snapshots = entry.getValue(); + for (SnapshotInfo info : snapshots) { + final String policyId = getPolicyId(info); + deleteSnapshot(policyId, repo, info.snapshotId(), slmStats, ActionListener.wrap(acknowledgedResponse -> { + deleted.incrementAndGet(); + if (acknowledgedResponse.isAcknowledged()) { + historyStore.putAsync(SnapshotHistoryItem.deletionSuccessRecord(Instant.now().toEpochMilli(), + info.snapshotId().getName(), policyId, repo)); + } else { + SnapshotHistoryItem.deletionPossibleSuccessRecord(Instant.now().toEpochMilli(), + info.snapshotId().getName(), policyId, repo, + "deletion request issued successfully, no acknowledgement received"); + } + }, e -> { + failed.incrementAndGet(); + try { + final SnapshotHistoryItem result = SnapshotHistoryItem.deletionFailureRecord(Instant.now().toEpochMilli(), + info.snapshotId().getName(), policyId, repo, e); + historyStore.putAsync(result); + } catch (IOException ex) { + // This shouldn't happen unless there's an issue with serializing the original exception + logger.error(new ParameterizedMessage( + "failed to record snapshot deletion failure for snapshot lifecycle policy [{}]", + policyId), ex); + } + })); + // Check whether we have exceeded the maximum time allowed to spend deleting + // snapshots, if we have, short-circuit the rest of the deletions + TimeValue elapsedDeletionTime = TimeValue.timeValueNanos(nowNanoSupplier.getAsLong() - startTime); + logger.debug("elapsed time for deletion of [{}] snapshot: {}", info.snapshotId(), elapsedDeletionTime); + if (elapsedDeletionTime.compareTo(maximumTime) > 0) { + logger.info("maximum snapshot retention deletion time reached, time spent: [{}]," + + " maximum allowed time: [{}], deleted [{}] out of [{}] snapshots scheduled for deletion, failed to delete [{}]", + elapsedDeletionTime, maximumTime, deleted, count, failed); + slmStats.deletionTime(elapsedDeletionTime); + slmStats.retentionTimedOut(); + return; + } + } + } + TimeValue totalElapsedTime = TimeValue.timeValueNanos(nowNanoSupplier.getAsLong() - startTime); + logger.debug("total elapsed time for deletion of [{}] snapshots: {}", deleted, totalElapsedTime); + slmStats.deletionTime(totalElapsedTime); + } + + /** + * Delete the given snapshot from the repository in blocking manner + * + * @param repo The repository the snapshot is in + * @param snapshot The snapshot metadata + * @param listener {@link ActionListener#onResponse(Object)} is called if a {@link SnapshotHistoryItem} can be created representing a + * successful or failed deletion call. {@link ActionListener#onFailure(Exception)} is called only if interrupted. + */ + void deleteSnapshot(String slmPolicy, String repo, SnapshotId snapshot, SnapshotLifecycleStats slmStats, + ActionListener listener) { + logger.info("[{}] snapshot retention deleting snapshot [{}]", repo, snapshot); + CountDownLatch latch = new CountDownLatch(1); + client.admin().cluster().prepareDeleteSnapshot(repo, snapshot.getName()) + .execute(new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + if (acknowledgedResponse.isAcknowledged()) { + logger.debug("[{}] snapshot [{}] deleted successfully", repo, snapshot); + } else { + logger.warn("[{}] snapshot [{}] delete issued but the request was not acknowledged", repo, snapshot); + } + listener.onResponse(acknowledgedResponse); + slmStats.snapshotDeleted(slmPolicy); + } + + @Override + public void onFailure(Exception e) { + logger.warn(new ParameterizedMessage("[{}] failed to delete snapshot [{}] for retention", + repo, snapshot), e); + slmStats.snapshotDeleteFailure(slmPolicy); + listener.onFailure(e); + } + }, latch)); + try { + // Deletes cannot occur simultaneously, so wait for this + // deletion to complete before attempting the next one + latch.await(); + } catch (InterruptedException e) { + logger.error(new ParameterizedMessage("[{}] deletion of snapshot [{}] interrupted", + repo, snapshot), e); + listener.onFailure(e); + slmStats.snapshotDeleteFailure(slmPolicy); + } + } + + void updateStateWithStats(SnapshotLifecycleStats newStats) { + clusterService.submitStateUpdateTask("update_slm_stats", new UpdateSnapshotLifecycleStatsTask(newStats)); + } + + public static boolean okayToDeleteSnapshots(ClusterState state) { + // Cannot delete during a snapshot + final SnapshotsInProgress snapshotsInProgress = state.custom(SnapshotsInProgress.TYPE); + if (snapshotsInProgress != null && snapshotsInProgress.entries().size() > 0) { + return false; + } + + // Cannot delete during an existing delete + final SnapshotDeletionsInProgress deletionsInProgress = state.custom(SnapshotDeletionsInProgress.TYPE); + if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { + return false; + } + + // Cannot delete while a repository is being cleaned + final RepositoryCleanupInProgress repositoryCleanupInProgress = state.custom(RepositoryCleanupInProgress.TYPE); + if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.cleanupInProgress() == false) { + return false; + } + + // Cannot delete during a restore + final RestoreInProgress restoreInProgress = state.custom(RestoreInProgress.TYPE); + if (restoreInProgress != null) { + return false; + } + + // It's okay to delete snapshots + return true; + } + + /** + * A {@link ClusterStateObserver.Listener} that invokes the given function with the new state, + * once no snapshots are running. If a snapshot is still running it registers a new listener + * and tries again. Passes any exceptions to the original exception listener if they occur. + */ + class NoSnapshotRunningListener implements ClusterStateObserver.Listener { + + private final Consumer reRun; + private final Consumer exceptionConsumer; + private final ClusterStateObserver observer; + + NoSnapshotRunningListener(ClusterStateObserver observer, + Consumer reRun, + Consumer exceptionConsumer) { + this.observer = observer; + this.reRun = reRun; + this.exceptionConsumer = exceptionConsumer; + } + + @Override + public void onNewClusterState(ClusterState state) { + try { + if (okayToDeleteSnapshots(state)) { + logger.debug("retrying SLM snapshot retention deletion after snapshot operation has completed"); + reRun.accept(state); + } else { + observer.waitForNextChange(this); + } + } catch (Exception e) { + exceptionConsumer.accept(e); + } + } + + @Override + public void onClusterServiceClose() { + // This means the cluster is being shut down, so nothing to do here + } + + @Override + public void onTimeout(TimeValue timeout) { + exceptionConsumer.accept( + new IllegalStateException("slm retention snapshot deletion out while waiting for ongoing snapshot operations to complete")); + } + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/UpdateSnapshotLifecycleStatsTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/UpdateSnapshotLifecycleStatsTask.java new file mode 100644 index 00000000000..7d3946b57ce --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/UpdateSnapshotLifecycleStatsTask.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; + +/** + * {@link UpdateSnapshotLifecycleStatsTask} is a cluster state update task that retrieves the + * current SLM stats, merges them with the newly produced stats (non-mutating), and then updates + * the cluster state with the new stats numbers + */ +public class UpdateSnapshotLifecycleStatsTask extends ClusterStateUpdateTask { + private static final Logger logger = LogManager.getLogger(SnapshotRetentionTask.class); + + private final SnapshotLifecycleStats runStats; + + UpdateSnapshotLifecycleStatsTask(SnapshotLifecycleStats runStats) { + this.runStats = runStats; + } + + @Override + public ClusterState execute(ClusterState currentState) { + final MetaData currentMeta = currentState.metaData(); + final SnapshotLifecycleMetadata currentSlmMeta = currentMeta.custom(SnapshotLifecycleMetadata.TYPE); + + if (currentSlmMeta == null) { + return currentState; + } + + SnapshotLifecycleStats newMetrics = currentSlmMeta.getStats().merge(runStats); + SnapshotLifecycleMetadata newSlmMeta = new SnapshotLifecycleMetadata(currentSlmMeta.getSnapshotConfigurations(), + currentSlmMeta.getOperationMode(), newMetrics); + + return ClusterState.builder(currentState) + .metaData(MetaData.builder(currentMeta) + .putCustom(SnapshotLifecycleMetadata.TYPE, newSlmMeta)) + .build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error(new ParameterizedMessage("failed to update cluster state with snapshot lifecycle stats, " + + "source: [{}], missing stats: [{}]", source, runStats), + e); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleStatsAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleStatsAction.java new file mode 100644 index 00000000000..b8629c2db57 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleStatsAction.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleStatsAction; + +public class RestGetSnapshotLifecycleStatsAction extends BaseRestHandler { + + public RestGetSnapshotLifecycleStatsAction(RestController controller) { + controller.registerHandler(RestRequest.Method.GET, "/_slm/stats", this); + } + + @Override + public String getName() { + return "slm_get_lifecycle_stats"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + GetSnapshotLifecycleStatsAction.Request req = new GetSnapshotLifecycleStatsAction.Request(); + req.timeout(request.paramAsTime("timeout", req.timeout())); + req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + + return channel -> client.execute(GetSnapshotLifecycleStatsAction.INSTANCE, req, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportDeleteSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportDeleteSnapshotLifecycleAction.java index f17020d6272..7784be3d61e 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportDeleteSnapshotLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportDeleteSnapshotLifecycleAction.java @@ -81,7 +81,8 @@ public class TransportDeleteSnapshotLifecycleAction extends return ClusterState.builder(currentState) .metaData(MetaData.builder(metaData) .putCustom(SnapshotLifecycleMetadata.TYPE, - new SnapshotLifecycleMetadata(newConfigs, snapMeta.getOperationMode()))) + new SnapshotLifecycleMetadata(newConfigs, + snapMeta.getOperationMode(), snapMeta.getStats().removePolicy(request.getLifecycleId())))) .build(); } }); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java index 98331b9f639..d45e97eb5ab 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyItem; import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleAction; +import org.elasticsearch.xpack.slm.SnapshotLifecycleStats; import java.io.IOException; import java.util.Arrays; @@ -85,6 +86,7 @@ public class TransportGetSnapshotLifecycleAction extends } final Set ids = new HashSet<>(Arrays.asList(request.getLifecycleIds())); + final SnapshotLifecycleStats slmStats = snapMeta.getStats(); List lifecycles = snapMeta.getSnapshotConfigurations().values().stream() .filter(meta -> { if (ids.isEmpty()) { @@ -93,7 +95,9 @@ public class TransportGetSnapshotLifecycleAction extends return ids.contains(meta.getPolicy().getId()); } }) - .map(policyMeta -> new SnapshotLifecyclePolicyItem(policyMeta, inProgress.get(policyMeta.getPolicy().getId()))) + .map(policyMeta -> + new SnapshotLifecyclePolicyItem(policyMeta, inProgress.get(policyMeta.getPolicy().getId()), + slmStats.getMetrics().get(policyMeta.getPolicy().getId()))) .collect(Collectors.toList()); listener.onResponse(new GetSnapshotLifecycleAction.Response(lifecycles)); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleStatsAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleStatsAction.java new file mode 100644 index 00000000000..7f017f09969 --- /dev/null +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleStatsAction.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleStatsAction; +import org.elasticsearch.xpack.slm.SnapshotLifecycleStats; + +import java.io.IOException; + +public class TransportGetSnapshotLifecycleStatsAction extends + TransportMasterNodeAction { + + @Inject + public TransportGetSnapshotLifecycleStatsAction(TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(GetSnapshotLifecycleStatsAction.NAME, transportService, clusterService, threadPool, actionFilters, + GetSnapshotLifecycleStatsAction.Request::new, indexNameExpressionResolver); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected GetSnapshotLifecycleStatsAction.Response read(StreamInput in) throws IOException { + return new GetSnapshotLifecycleStatsAction.Response(in); + } + + @Override + protected void masterOperation(GetSnapshotLifecycleStatsAction.Request request, + ClusterState state, ActionListener listener) { + SnapshotLifecycleMetadata slmMeta = state.metaData().custom(SnapshotLifecycleMetadata.TYPE); + if (slmMeta == null) { + listener.onResponse(new GetSnapshotLifecycleStatsAction.Response(new SnapshotLifecycleStats())); + } else { + listener.onResponse(new GetSnapshotLifecycleStatsAction.Response(slmMeta.getStats())); + } + } + + @Override + protected ClusterBlockException checkBlock(GetSnapshotLifecycleStatsAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } +} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportPutSnapshotLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportPutSnapshotLifecycleAction.java index 82034784a76..aa54d1de8a2 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportPutSnapshotLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/action/TransportPutSnapshotLifecycleAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadata; import org.elasticsearch.xpack.core.slm.action.PutSnapshotLifecycleAction; import org.elasticsearch.xpack.slm.SnapshotLifecycleService; +import org.elasticsearch.xpack.slm.SnapshotLifecycleStats; import java.io.IOException; import java.time.Instant; @@ -92,7 +93,8 @@ public class TransportPutSnapshotLifecycleAction extends OperationMode mode = Optional.ofNullable(ilmMeta) .map(IndexLifecycleMetadata::getOperationMode) .orElse(OperationMode.RUNNING); - lifecycleMetadata = new SnapshotLifecycleMetadata(Collections.singletonMap(id, meta), mode); + lifecycleMetadata = new SnapshotLifecycleMetadata(Collections.singletonMap(id, meta), + mode, new SnapshotLifecycleStats()); logger.info("adding new snapshot lifecycle [{}]", id); } else { Map snapLifecycles = new HashMap<>(snapMeta.getSnapshotConfigurations()); @@ -104,7 +106,8 @@ public class TransportPutSnapshotLifecycleAction extends .setModifiedDate(Instant.now().toEpochMilli()) .build(); snapLifecycles.put(id, newLifecycle); - lifecycleMetadata = new SnapshotLifecycleMetadata(snapLifecycles, snapMeta.getOperationMode()); + lifecycleMetadata = new SnapshotLifecycleMetadata(snapLifecycles, + snapMeta.getOperationMode(), snapMeta.getStats()); if (oldLifecycle == null) { logger.info("adding new snapshot lifecycle [{}]", id); } else { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java index b18141d25df..d568f643f27 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java @@ -59,6 +59,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; import static org.elasticsearch.client.Requests.clusterHealthRequest; @@ -204,7 +205,7 @@ public class IndexLifecycleInitialisationTests extends ESIntegTestCase { public void testExplainExecution() throws Exception { // start node logger.info("Starting server1"); - final String server_1 = internalCluster().startNode(); + internalCluster().startNode(); logger.info("Creating lifecycle [test_lifecycle]"); PutLifecycleAction.Request putLifecycleRequest = new PutLifecycleAction.Request(lifecyclePolicy); PutLifecycleAction.Response putLifecycleResponse = client().execute(PutLifecycleAction.INSTANCE, putLifecycleRequest).get(); @@ -223,15 +224,39 @@ public class IndexLifecycleInitialisationTests extends ESIntegTestCase { .actionGet(); assertAcked(createIndexResponse); + // using AtomicLong only to extract a value from a lambda rather than the more traditional atomic update use-case + AtomicLong originalLifecycleDate = new AtomicLong(); { PhaseExecutionInfo expectedExecutionInfo = new PhaseExecutionInfo(lifecyclePolicy.getName(), mockPhase, 1L, actualModifiedDate); assertBusy(() -> { - ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest(); - ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get(); - assertThat(explainResponse.getIndexResponses().size(), equalTo(1)); - IndexLifecycleExplainResponse indexResponse = explainResponse.getIndexResponses().get("test"); + IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse(); assertThat(indexResponse.getStep(), equalTo("observable_cluster_state_action")); assertThat(indexResponse.getPhaseExecutionInfo(), equalTo(expectedExecutionInfo)); + originalLifecycleDate.set(indexResponse.getLifecycleDate()); + }); + } + + // set the origination date setting to an older value + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Collections.singletonMap(LifecycleSettings.LIFECYCLE_ORIGINATION_DATE, 1000L)).get(); + + { + assertBusy(() -> { + IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse(); + assertThat("The configured origination date dictates the lifecycle date", + indexResponse.getLifecycleDate(), equalTo(1000L)); + }); + } + + // set the origination date setting to null + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Collections.singletonMap(LifecycleSettings.LIFECYCLE_ORIGINATION_DATE, null)).get(); + + { + assertBusy(() -> { + IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse(); + assertThat("Without the origination date, the index create date should dictate the lifecycle date", + indexResponse.getLifecycleDate(), equalTo(originalLifecycleDate.get())); }); } @@ -242,10 +267,7 @@ public class IndexLifecycleInitialisationTests extends ESIntegTestCase { { PhaseExecutionInfo expectedExecutionInfo = new PhaseExecutionInfo(lifecyclePolicy.getName(), null, 1L, actualModifiedDate); assertBusy(() -> { - ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest(); - ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get(); - assertThat(explainResponse.getIndexResponses().size(), equalTo(1)); - IndexLifecycleExplainResponse indexResponse = explainResponse.getIndexResponses().get("test"); + IndexLifecycleExplainResponse indexResponse = executeExplainRequestAndGetTestIndexResponse(); assertThat(indexResponse.getPhase(), equalTo(TerminalPolicyStep.COMPLETED_PHASE)); assertThat(indexResponse.getStep(), equalTo(TerminalPolicyStep.KEY.getName())); assertThat(indexResponse.getPhaseExecutionInfo(), equalTo(expectedExecutionInfo)); @@ -253,6 +275,13 @@ public class IndexLifecycleInitialisationTests extends ESIntegTestCase { } } + private IndexLifecycleExplainResponse executeExplainRequestAndGetTestIndexResponse() throws ExecutionException, InterruptedException { + ExplainLifecycleRequest explainRequest = new ExplainLifecycleRequest(); + ExplainLifecycleResponse explainResponse = client().execute(ExplainLifecycleAction.INSTANCE, explainRequest).get(); + assertThat(explainResponse.getIndexResponses().size(), equalTo(1)); + return explainResponse.getIndexResponses().get("test"); + } + public void testMasterDedicatedDataDedicated() throws Exception { settings = Settings.builder().put(settings).put("index.lifecycle.test.complete", true).build(); // start master node diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java index 168e166faba..506070780b6 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java @@ -1213,6 +1213,18 @@ public class IndexLifecycleRunnerTests extends ESTestCase { now.set(Long.MAX_VALUE); assertTrue("index should be able to transition past phase's age", runner.isReadyToTransitionToThisPhase(policyName, indexMetaData, "phase")); + + // Come back to the "present" + now.set(5L); + indexMetaData = IndexMetaData.builder(indexMetaData) + .settings(Settings.builder() + .put(indexMetaData.getSettings()) + .put(LifecycleSettings.LIFECYCLE_ORIGINATION_DATE, 3L) + .build()) + .putCustom(ILM_CUSTOM_METADATA_KEY, lifecycleState.build().asMap()) + .build(); + assertTrue("index should be able to transition due to the origination date indicating it's old enough", + runner.isReadyToTransitionToThisPhase(policyName, indexMetaData, "phase")); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTaskTests.java index bcd268b8b3e..f3ed5924cfe 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/OperationModeUpdateTaskTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ilm.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.slm.SnapshotLifecycleStats; import java.util.Collections; @@ -58,7 +59,8 @@ public class OperationModeUpdateTaskTests extends ESTestCase { private OperationMode executeUpdate(boolean metadataInstalled, OperationMode currentMode, OperationMode requestMode, boolean assertSameClusterState) { IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), currentMode); - SnapshotLifecycleMetadata snapshotLifecycleMetadata = new SnapshotLifecycleMetadata(Collections.emptyMap(), currentMode); + SnapshotLifecycleMetadata snapshotLifecycleMetadata = + new SnapshotLifecycleMetadata(Collections.emptyMap(), currentMode, new SnapshotLifecycleStats()); ImmutableOpenMap.Builder customsMapBuilder = ImmutableOpenMap.builder(); MetaData.Builder metaData = MetaData.builder() .persistentSettings(settings(Version.CURRENT).build()); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java new file mode 100644 index 00000000000..b42a1f98074 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java @@ -0,0 +1,325 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.snapshots.SnapshotMissingException; +import org.elasticsearch.snapshots.mockstore.MockRepository; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyItem; +import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; +import org.elasticsearch.xpack.core.slm.action.ExecuteSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleAction; +import org.elasticsearch.xpack.core.slm.action.PutSnapshotLifecycleAction; +import org.elasticsearch.xpack.ilm.IndexLifecycle; +import org.junit.After; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +/** + * Tests for Snapshot Lifecycle Management that require a slow or blocked snapshot repo (using {@link MockRepository} + */ +@TestLogging(value = "org.elasticsearch.snapshots.mockstore:DEBUG", reason = "d") +public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase { + + @After + public void resetSLMSettings() { + // unset retention settings + client().admin().cluster().prepareUpdateSettings() + .setTransientSettings(Settings.builder() + .put(LifecycleSettings.SLM_RETENTION_SCHEDULE, (String) null) + .build()) + .get(); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockRepository.Plugin.class, LocalStateCompositeXPackPlugin.class, IndexLifecycle.class); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + settings.put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), true); + settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); + settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); + settings.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); + return settings.build(); + } + + @Override + protected Collection> transportClientPlugins() { + return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class); + } + + @Override + protected Settings transportClientSettings() { + Settings.Builder settings = Settings.builder().put(super.transportClientSettings()); + settings.put(XPackSettings.INDEX_LIFECYCLE_ENABLED.getKey(), true); + settings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); + settings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + settings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + settings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + settings.put(XPackSettings.GRAPH_ENABLED.getKey(), false); + settings.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); + return settings.build(); + } + + + public void testSnapshotInProgress() throws Exception { + final String indexName = "test"; + final String policyName = "test-policy"; + final String repoId = "my-repo"; + int docCount = 20; + for (int i = 0; i < docCount; i++) { + index(indexName, "_doc", i + "", Collections.singletonMap("foo", "bar")); + } + + // Create a snapshot repo + initializeRepo(repoId); + + logger.info("--> creating policy {}", policyName); + createSnapshotPolicy(policyName, "snap", "1 2 3 4 5 ?", repoId, indexName, true); + + logger.info("--> blocking master from completing snapshot"); + blockAllDataNodes(repoId); + blockMasterFromFinalizingSnapshotOnIndexFile(repoId); + + logger.info("--> executing snapshot lifecycle"); + final String snapshotName = executePolicy(policyName); + + // Check that the executed snapshot shows up in the SLM output + assertBusy(() -> { + GetSnapshotLifecycleAction.Response getResp = + client().execute(GetSnapshotLifecycleAction.INSTANCE, new GetSnapshotLifecycleAction.Request(policyName)).get(); + logger.info("--> checking for in progress snapshot..."); + + assertThat(getResp.getPolicies().size(), greaterThan(0)); + SnapshotLifecyclePolicyItem item = getResp.getPolicies().get(0); + assertNotNull(item.getSnapshotInProgress()); + SnapshotLifecyclePolicyItem.SnapshotInProgress inProgress = item.getSnapshotInProgress(); + assertThat(inProgress.getSnapshotId().getName(), equalTo(snapshotName)); + assertThat(inProgress.getStartTime(), greaterThan(0L)); + assertThat(inProgress.getState(), anyOf(equalTo(SnapshotsInProgress.State.INIT), equalTo(SnapshotsInProgress.State.STARTED))); + assertNull(inProgress.getFailure()); + }); + + logger.info("--> unblocking snapshots"); + unblockAllDataNodes(repoId); + unblockRepo(repoId); + + // Cancel/delete the snapshot + try { + client().admin().cluster().prepareDeleteSnapshot(repoId, snapshotName).get(); + } catch (SnapshotMissingException e) { + // ignore + } + } + + public void testRetentionWhileSnapshotInProgress() throws Exception { + final String indexName = "test"; + final String policyId = "slm-policy"; + final String repoId = "slm-repo"; + int docCount = 20; + for (int i = 0; i < docCount; i++) { + index(indexName, "_doc", i + "", Collections.singletonMap("foo", "bar")); + } + + initializeRepo(repoId); + + logger.info("--> creating policy {}", policyId); + createSnapshotPolicy(policyId, "snap", "1 2 3 4 5 ?", repoId, indexName, true, + new SnapshotRetentionConfiguration(TimeValue.timeValueSeconds(0), null, null)); + + // Create a snapshot and wait for it to be complete (need something that can be deleted) + final String completedSnapshotName = executePolicy(policyId); + logger.info("--> kicked off snapshot {}", completedSnapshotName); + assertBusy(() -> { + try { + SnapshotsStatusResponse s = + client().admin().cluster().prepareSnapshotStatus(repoId).setSnapshots(completedSnapshotName).get(); + assertThat("expected a snapshot but none were returned", s.getSnapshots().size(), equalTo(1)); + SnapshotStatus status = s.getSnapshots().get(0); + logger.info("--> waiting for snapshot {} to be completed, got: {}", completedSnapshotName, status.getState()); + assertThat(status.getState(), equalTo(SnapshotsInProgress.State.SUCCESS)); + } catch (SnapshotMissingException e) { + logger.error("expected a snapshot but it was missing", e); + fail("expected a snapshot with name " + completedSnapshotName + " but it does not exist"); + } + }); + + // Wait for all running snapshots to be cleared from cluster state + assertBusy(() -> { + logger.info("--> waiting for cluster state to be clear of snapshots"); + ClusterState state = client().admin().cluster().prepareState().setCustoms(true).get().getState(); + assertTrue("cluster state was not ready for deletion " + state, SnapshotRetentionTask.okayToDeleteSnapshots(state)); + }); + + // Take another snapshot, but before doing that, block it from completing + logger.info("--> blocking nodes from completing snapshot"); + blockAllDataNodes(repoId); + final String secondSnapName = executePolicy(policyId); + + // Check that the executed snapshot shows up in the SLM output as in_progress + assertBusy(() -> { + GetSnapshotLifecycleAction.Response getResp = + client().execute(GetSnapshotLifecycleAction.INSTANCE, new GetSnapshotLifecycleAction.Request(policyId)).get(); + logger.info("--> checking for in progress snapshot..."); + + assertThat(getResp.getPolicies().size(), greaterThan(0)); + SnapshotLifecyclePolicyItem item = getResp.getPolicies().get(0); + assertNotNull(item.getSnapshotInProgress()); + SnapshotLifecyclePolicyItem.SnapshotInProgress inProgress = item.getSnapshotInProgress(); + assertThat(inProgress.getSnapshotId().getName(), equalTo(secondSnapName)); + assertThat(inProgress.getStartTime(), greaterThan(0L)); + assertThat(inProgress.getState(), anyOf(equalTo(SnapshotsInProgress.State.INIT), equalTo(SnapshotsInProgress.State.STARTED))); + assertNull(inProgress.getFailure()); + }); + + // Run retention every second + client().admin().cluster().prepareUpdateSettings() + .setTransientSettings(Settings.builder() + .put(LifecycleSettings.SLM_RETENTION_SCHEDULE, "*/1 * * * * ?") + .build()) + .get(); + // Guarantee that retention gets a chance to run before unblocking, I know sleeps are not + // ideal, but we don't currently have a way to force retention to run, so waiting at least + // a second is the best we can do for now. + Thread.sleep(1500); + + logger.info("--> unblocking snapshots"); + unblockRepo(repoId); + unblockAllDataNodes(repoId); + + // Check that the snapshot created by the policy has been removed by retention + assertBusy(() -> { + // Trigger a cluster state update so that it re-checks for a snapshot in progress + client().admin().cluster().prepareReroute().get(); + logger.info("--> waiting for snapshot to be deleted"); + try { + SnapshotsStatusResponse s = + client().admin().cluster().prepareSnapshotStatus(repoId).setSnapshots(completedSnapshotName).get(); + assertNull("expected no snapshot but one was returned", s.getSnapshots().get(0)); + } catch (SnapshotMissingException e) { + // Great, we wanted it to be deleted! + } + }); + + // Cancel/delete the snapshot + try { + client().admin().cluster().prepareDeleteSnapshot(repoId, secondSnapName).get(); + } catch (SnapshotMissingException e) { + // ignore + } + } + + private void initializeRepo(String repoName) { + client().admin().cluster().preparePutRepository(repoName) + .setType("mock") + .setSettings(Settings.builder() + .put("compress", randomBoolean()) + .put("location", randomAlphaOfLength(6)) + .build()) + .get(); + } + + private void createSnapshotPolicy(String policyName, String snapshotNamePattern, String schedule, String repoId, + String indexPattern, boolean ignoreUnavailable) { + createSnapshotPolicy(policyName, snapshotNamePattern, schedule, repoId, indexPattern, + ignoreUnavailable, SnapshotRetentionConfiguration.EMPTY); + } + + private void createSnapshotPolicy(String policyName, String snapshotNamePattern, String schedule, String repoId, + String indexPattern, boolean ignoreUnavailable, + SnapshotRetentionConfiguration retention) { + Map snapConfig = new HashMap<>(); + snapConfig.put("indices", Collections.singletonList(indexPattern)); + snapConfig.put("ignore_unavailable", ignoreUnavailable); + if (randomBoolean()) { + Map metadata = new HashMap<>(); + int fieldCount = randomIntBetween(2,5); + for (int i = 0; i < fieldCount; i++) { + metadata.put(randomValueOtherThanMany(key -> "policy".equals(key) || metadata.containsKey(key), + () -> randomAlphaOfLength(5)), randomAlphaOfLength(4)); + } + } + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(policyName, snapshotNamePattern, schedule, + repoId, snapConfig, retention); + + PutSnapshotLifecycleAction.Request putLifecycle = new PutSnapshotLifecycleAction.Request(policyName, policy); + try { + client().execute(PutSnapshotLifecycleAction.INSTANCE, putLifecycle).get(); + } catch (Exception e) { + logger.error("failed to create slm policy", e); + fail("failed to create policy " + policy + " got: " + e); + } + } + + /** + * Execute the given policy and return the generated snapshot name + */ + private String executePolicy(String policyId) { + ExecuteSnapshotLifecycleAction.Request executeReq = new ExecuteSnapshotLifecycleAction.Request(policyId); + ExecuteSnapshotLifecycleAction.Response resp = null; + try { + resp = client().execute(ExecuteSnapshotLifecycleAction.INSTANCE, executeReq).get(); + return resp.getSnapshotName(); + } catch (Exception e) { + logger.error("failed to execute policy", e); + fail("failed to execute policy " + policyId + " got: " + e); + return "bad"; + } + } + + public static void blockMasterFromFinalizingSnapshotOnIndexFile(final String repositoryName) { + for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + ((MockRepository)repositoriesService.repository(repositoryName)).setBlockOnWriteIndexFile(true); + } + } + + public static String unblockRepo(final String repositoryName) { + final String masterName = internalCluster().getMasterName(); + ((MockRepository)internalCluster().getInstance(RepositoriesService.class, masterName) + .repository(repositoryName)).unblock(); + return masterName; + } + + public static void blockAllDataNodes(String repository) { + for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + ((MockRepository)repositoriesService.repository(repository)).blockOnDataFiles(true); + } + } + + public static void unblockAllDataNodes(String repository) { + for(RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { + ((MockRepository)repositoriesService.repository(repository)).unblock(); + } + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java index 9370cad7f87..6523d7a0722 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java @@ -11,12 +11,15 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests; +import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTests.randomSnapshotLifecyclePolicy; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -30,29 +33,34 @@ public class SnapshotLifecyclePolicyTests extends AbstractSerializingTestCase", "1 * * * * ?", "repo", Collections.emptyMap()); + p = new SnapshotLifecyclePolicy("id", "", "1 * * * * ?", "repo", Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY); assertThat(p.generateSnapshotName(context), startsWith("name-2019.03.15-")); assertThat(p.generateSnapshotName(context).length(), greaterThan("name-2019.03.15-".length())); - p = new SnapshotLifecyclePolicy("id", "", "1 * * * * ?", "repo", Collections.emptyMap()); + p = new SnapshotLifecyclePolicy("id", "", "1 * * * * ?", "repo", Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY); assertThat(p.generateSnapshotName(context), startsWith("name-2019.03.01-")); - p = new SnapshotLifecyclePolicy("id", "", "1 * * * * ?", "repo", Collections.emptyMap()); + p = new SnapshotLifecyclePolicy("id", "", "1 * * * * ?", "repo", Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY); assertThat(p.generateSnapshotName(context), startsWith("name-2019-03-15.21:09:00-")); } public void testNextExecutionTime() { - SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy("id", "name", "0 1 2 3 4 ? 2099", "repo", Collections.emptyMap()); + SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy("id", "name", "0 1 2 3 4 ? 2099", "repo", Collections.emptyMap(), + SnapshotRetentionConfiguration.EMPTY); assertThat(p.calculateNextExecution(), equalTo(4078864860000L)); } public void testValidation() { SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy("a,b", "", - "* * * * * L", " ", Collections.emptyMap()); + "* * * * * L", " ", Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY); ValidationException e = policy.validate(); assertThat(e.validationErrors(), @@ -64,7 +72,7 @@ public class SnapshotLifecyclePolicyTests extends AbstractSerializingTestCase", - "1 * * * * ?", "myrepo", configuration); + "1 * * * * ?", "myrepo", configuration, SnapshotRetentionConfiguration.EMPTY); ValidationException e = policy.validate(); assertThat(e.validationErrors(), contains("invalid configuration.metadata [" + metadataString + "]: must be an object if present")); @@ -93,7 +101,7 @@ public class SnapshotLifecyclePolicyTests extends AbstractSerializingTestCase", - "1 * * * * ?", "myrepo", configuration); + "1 * * * * ?", "myrepo", configuration, SnapshotRetentionConfiguration.EMPTY); ValidationException e = policy.validate(); assertThat(e.validationErrors(), contains("invalid configuration.metadata: field name [policy] is reserved and " + "will be added automatically")); @@ -113,7 +121,7 @@ public class SnapshotLifecyclePolicyTests extends AbstractSerializingTestCase", - "1 * * * * ?", "myrepo", configuration); + "1 * * * * ?", "myrepo", configuration, SnapshotRetentionConfiguration.EMPTY); ValidationException e = policy.validate(); assertThat(e.validationErrors(), contains("invalid configuration.metadata: must be smaller than [1004] bytes, but is [" + totalBytes + "] bytes")); @@ -131,54 +139,37 @@ public class SnapshotLifecyclePolicyTests extends AbstractSerializingTestCase config = null; - if (randomBoolean()) { - config = new HashMap<>(); - for (int i = 0; i < randomIntBetween(2, 5); i++) { - config.put(randomAlphaOfLength(4), randomAlphaOfLength(4)); - } - } - return new SnapshotLifecyclePolicy(id, - randomAlphaOfLength(4), - randomSchedule(), - randomAlphaOfLength(4), - config); - } - - private static String randomSchedule() { - return randomIntBetween(0, 59) + " " + - randomIntBetween(0, 59) + " " + - randomIntBetween(0, 12) + " * * ?"; - } - @Override - protected SnapshotLifecyclePolicy mutateInstance(SnapshotLifecyclePolicy instance) throws IOException { - switch (between(0, 4)) { + protected SnapshotLifecyclePolicy mutateInstance(SnapshotLifecyclePolicy instance) { + switch (between(0, 5)) { case 0: return new SnapshotLifecyclePolicy(instance.getId() + randomAlphaOfLength(2), instance.getName(), instance.getSchedule(), instance.getRepository(), - instance.getConfig()); + instance.getConfig(), + instance.getRetentionPolicy()); case 1: return new SnapshotLifecyclePolicy(instance.getId(), instance.getName() + randomAlphaOfLength(2), instance.getSchedule(), instance.getRepository(), - instance.getConfig()); + instance.getConfig(), + instance.getRetentionPolicy()); case 2: return new SnapshotLifecyclePolicy(instance.getId(), instance.getName(), - randomValueOtherThan(instance.getSchedule(), SnapshotLifecyclePolicyTests::randomSchedule), + randomValueOtherThan(instance.getSchedule(), SnapshotLifecyclePolicyMetadataTests::randomSchedule), instance.getRepository(), - instance.getConfig()); + instance.getConfig(), + instance.getRetentionPolicy()); case 3: return new SnapshotLifecyclePolicy(instance.getId(), instance.getName(), instance.getSchedule(), instance.getRepository() + randomAlphaOfLength(2), - instance.getConfig()); + instance.getConfig(), + instance.getRetentionPolicy()); case 4: Map newConfig = new HashMap<>(); for (int i = 0; i < randomIntBetween(2, 5); i++) { @@ -188,7 +179,15 @@ public class SnapshotLifecyclePolicyTests extends AbstractSerializingTestCase policies = new HashMap<>(); policies.put(newPolicy.getPolicy().getId(), newPolicy); - ClusterState emptyState = createState(new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING)); - ClusterState state = createState(new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING)); + ClusterState emptyState = + createState(new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING, new SnapshotLifecycleStats())); + ClusterState state = + createState(new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING, new SnapshotLifecycleStats())); sls.clusterChanged(new ClusterChangedEvent("1", state, emptyState)); @@ -117,13 +121,13 @@ public class SnapshotLifecycleServiceTests extends ESTestCase { sls.onMaster(); assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.singleton("initial-1"))); - state = createState(new SnapshotLifecycleMetadata(policies, OperationMode.STOPPING)); + state = createState(new SnapshotLifecycleMetadata(policies, OperationMode.STOPPING, new SnapshotLifecycleStats())); sls.clusterChanged(new ClusterChangedEvent("2", state, emptyState)); // Since the service is stopping, jobs should have been cancelled assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.emptySet())); - state = createState(new SnapshotLifecycleMetadata(policies, OperationMode.STOPPED)); + state = createState(new SnapshotLifecycleMetadata(policies, OperationMode.STOPPED, new SnapshotLifecycleStats())); sls.clusterChanged(new ClusterChangedEvent("3", state, emptyState)); // Since the service is stopped, jobs should have been cancelled @@ -148,7 +152,8 @@ public class SnapshotLifecycleServiceTests extends ESTestCase { () -> new FakeSnapshotTask(e -> trigger.get().accept(e)), clusterService, clock)) { sls.offMaster(); - SnapshotLifecycleMetadata snapMeta = new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + SnapshotLifecycleMetadata snapMeta = + new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING, new SnapshotLifecycleStats()); ClusterState previousState = createState(snapMeta); Map policies = new HashMap<>(); @@ -158,7 +163,7 @@ public class SnapshotLifecycleServiceTests extends ESTestCase { .setModifiedDate(1) .build(); policies.put(policy.getPolicy().getId(), policy); - snapMeta = new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING); + snapMeta = new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING, new SnapshotLifecycleStats()); ClusterState state = createState(snapMeta); ClusterChangedEvent event = new ClusterChangedEvent("1", state, previousState); trigger.set(e -> { @@ -187,7 +192,7 @@ public class SnapshotLifecycleServiceTests extends ESTestCase { .setModifiedDate(2) .build(); policies.put(policy.getPolicy().getId(), newPolicy); - state = createState(new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING)); + state = createState(new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING, new SnapshotLifecycleStats())); event = new ClusterChangedEvent("2", state, previousState); sls.clusterChanged(event); assertThat(sls.getScheduler().scheduledJobIds(), equalTo(Collections.singleton("foo-2"))); @@ -204,7 +209,8 @@ public class SnapshotLifecycleServiceTests extends ESTestCase { final int currentCount2 = triggerCount.get(); previousState = state; // Create a state simulating the policy being deleted - state = createState(new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING)); + state = + createState(new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING, new SnapshotLifecycleStats())); event = new ClusterChangedEvent("2", state, previousState); sls.clusterChanged(event); clock.fastForwardSeconds(2); @@ -221,7 +227,7 @@ public class SnapshotLifecycleServiceTests extends ESTestCase { .setModifiedDate(1) .build(); policies.put(policy.getPolicy().getId(), policy); - snapMeta = new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING); + snapMeta = new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING, new SnapshotLifecycleStats()); previousState = state; state = createState(snapMeta); event = new ClusterChangedEvent("1", state, previousState); @@ -254,7 +260,8 @@ public class SnapshotLifecycleServiceTests extends ESTestCase { () -> new FakeSnapshotTask(e -> trigger.get().accept(e)), clusterService, clock)) { sls.onMaster(); - SnapshotLifecycleMetadata snapMeta = new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); + SnapshotLifecycleMetadata snapMeta = + new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING, new SnapshotLifecycleStats()); ClusterState previousState = createState(snapMeta); Map policies = new HashMap<>(); @@ -265,7 +272,7 @@ public class SnapshotLifecycleServiceTests extends ESTestCase { .setModifiedDate(1) .build(); policies.put(policy.getPolicy().getId(), policy); - snapMeta = new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING); + snapMeta = new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING, new SnapshotLifecycleStats()); ClusterState state = createState(snapMeta); ClusterChangedEvent event = new ClusterChangedEvent("1", state, previousState); sls.clusterChanged(event); @@ -280,7 +287,7 @@ public class SnapshotLifecycleServiceTests extends ESTestCase { .setModifiedDate(1) .build(); policies.put(secondPolicy.getPolicy().getId(), secondPolicy); - snapMeta = new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING); + snapMeta = new SnapshotLifecycleMetadata(policies, OperationMode.RUNNING, new SnapshotLifecycleStats()); state = createState(snapMeta); event = new ClusterChangedEvent("2", state, previousState); sls.clusterChanged(event); @@ -329,10 +336,11 @@ public class SnapshotLifecycleServiceTests extends ESTestCase { indices.add("foo-*"); indices.add(randomAlphaOfLength(4)); config.put("indices", indices); - return new SnapshotLifecyclePolicy(id, randomAlphaOfLength(4), schedule, randomAlphaOfLength(4), config); + return new SnapshotLifecyclePolicy(id, randomAlphaOfLength(4), schedule, randomAlphaOfLength(4), config, + SnapshotRetentionConfiguration.EMPTY); } - private static String randomSchedule() { + public static String randomSchedule() { return randomIntBetween(0, 59) + " " + randomIntBetween(0, 59) + " " + randomIntBetween(0, 12) + " * * ?"; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java index 65897c7e1ee..84c1d12cce6 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java @@ -56,7 +56,8 @@ public class SnapshotLifecycleTaskTests extends ESTestCase { public void testGetSnapMetadata() { final String id = randomAlphaOfLength(4); final SnapshotLifecyclePolicyMetadata slpm = makePolicyMeta(id); - final SnapshotLifecycleMetadata meta = new SnapshotLifecycleMetadata(Collections.singletonMap(id, slpm), OperationMode.RUNNING); + final SnapshotLifecycleMetadata meta = + new SnapshotLifecycleMetadata(Collections.singletonMap(id, slpm), OperationMode.RUNNING, new SnapshotLifecycleStats()); final ClusterState state = ClusterState.builder(new ClusterName("test")) .metaData(MetaData.builder() @@ -76,7 +77,8 @@ public class SnapshotLifecycleTaskTests extends ESTestCase { public void testSkipCreatingSnapshotWhenJobDoesNotMatch() { final String id = randomAlphaOfLength(4); final SnapshotLifecyclePolicyMetadata slpm = makePolicyMeta(id); - final SnapshotLifecycleMetadata meta = new SnapshotLifecycleMetadata(Collections.singletonMap(id, slpm), OperationMode.RUNNING); + final SnapshotLifecycleMetadata meta = + new SnapshotLifecycleMetadata(Collections.singletonMap(id, slpm), OperationMode.RUNNING, new SnapshotLifecycleStats()); final ClusterState state = ClusterState.builder(new ClusterName("test")) .metaData(MetaData.builder() @@ -106,7 +108,8 @@ public class SnapshotLifecycleTaskTests extends ESTestCase { public void testCreateSnapshotOnTrigger() { final String id = randomAlphaOfLength(4); final SnapshotLifecyclePolicyMetadata slpm = makePolicyMeta(id); - final SnapshotLifecycleMetadata meta = new SnapshotLifecycleMetadata(Collections.singletonMap(id, slpm), OperationMode.RUNNING); + final SnapshotLifecycleMetadata meta = + new SnapshotLifecycleMetadata(Collections.singletonMap(id, slpm), OperationMode.RUNNING, new SnapshotLifecycleStats()); final ClusterState state = ClusterState.builder(new ClusterName("test")) .metaData(MetaData.builder() diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java new file mode 100644 index 00000000000..a46b8632048 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionServiceTests.java @@ -0,0 +1,87 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.core.slm.history.SnapshotHistoryStore; +import org.elasticsearch.xpack.core.watcher.watch.ClockMock; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SnapshotRetentionServiceTests extends ESTestCase { + + private static final ClusterSettings clusterSettings; + static { + Set> internalSettings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + internalSettings.add(LifecycleSettings.SLM_RETENTION_SCHEDULE_SETTING); + clusterSettings = new ClusterSettings(Settings.EMPTY, internalSettings); + } + + public void testJobsAreScheduled() { + final DiscoveryNode discoveryNode = new DiscoveryNode("node", ESTestCase.buildNewFakeTransportAddress(), + Collections.emptyMap(), DiscoveryNodeRole.BUILT_IN_ROLES, Version.CURRENT); + ClockMock clock = new ClockMock(); + + try (ThreadPool threadPool = new TestThreadPool("test"); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, discoveryNode, clusterSettings); + SnapshotRetentionService service = new SnapshotRetentionService(Settings.EMPTY, + FakeRetentionTask::new, clusterService, clock)) { + assertThat(service.getScheduler().jobCount(), equalTo(0)); + + service.onMaster(); + service.setUpdateSchedule(SnapshotLifecycleServiceTests.randomSchedule()); + assertThat(service.getScheduler().scheduledJobIds(), containsInAnyOrder(SnapshotRetentionService.SLM_RETENTION_JOB_ID)); + + service.offMaster(); + assertThat(service.getScheduler().jobCount(), equalTo(0)); + + service.onMaster(); + assertThat(service.getScheduler().scheduledJobIds(), containsInAnyOrder(SnapshotRetentionService.SLM_RETENTION_JOB_ID)); + + service.setUpdateSchedule(""); + assertThat(service.getScheduler().jobCount(), equalTo(0)); + threadPool.shutdownNow(); + } + } + + private static class FakeRetentionTask extends SnapshotRetentionTask { + FakeRetentionTask() { + super(fakeClient(), null, System::nanoTime, mock(SnapshotHistoryStore.class), mock(ThreadPool.class)); + } + + @Override + public void triggered(SchedulerEngine.Event event) { + super.triggered(event); + } + } + + private static Client fakeClient() { + Client c = mock(Client.class); + when(c.settings()).thenReturn(Settings.EMPTY); + return c; + } +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java new file mode 100644 index 00000000000..53c85c5e230 --- /dev/null +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java @@ -0,0 +1,463 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.slm; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.RepositoryCleanupInProgress; +import org.elasticsearch.cluster.RestoreInProgress; +import org.elasticsearch.cluster.SnapshotDeletionsInProgress; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ilm.LifecycleSettings; +import org.elasticsearch.xpack.core.ilm.OperationMode; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadata; +import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; +import org.elasticsearch.xpack.core.slm.history.SnapshotHistoryStore; + +import java.time.ZoneOffset; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.LongSupplier; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.slm.history.SnapshotHistoryItem.DELETE_OPERATION; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; + +public class SnapshotRetentionTaskTests extends ESTestCase { + + public void testGetAllPoliciesWithRetentionEnabled() { + SnapshotLifecyclePolicy policyWithout = new SnapshotLifecyclePolicy("policyWithout", "snap", "1 * * * * ?", + "repo", null, SnapshotRetentionConfiguration.EMPTY); + SnapshotLifecyclePolicy policyWithout2 = new SnapshotLifecyclePolicy("policyWithout2", "snap", "1 * * * * ?", + "repo", null, new SnapshotRetentionConfiguration(null, null, null)); + SnapshotLifecyclePolicy policyWith = new SnapshotLifecyclePolicy("policyWith", "snap", "1 * * * * ?", + "repo", null, new SnapshotRetentionConfiguration(TimeValue.timeValueDays(30), null, null)); + + // Test with no SLM metadata + ClusterState state = ClusterState.builder(new ClusterName("cluster")).build(); + assertThat(SnapshotRetentionTask.getAllPoliciesWithRetentionEnabled(state), equalTo(Collections.emptyMap())); + + // Test with empty SLM metadata + MetaData metaData = MetaData.builder() + .putCustom(SnapshotLifecycleMetadata.TYPE, + new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING, new SnapshotLifecycleStats())) + .build(); + state = ClusterState.builder(new ClusterName("cluster")).metaData(metaData).build(); + assertThat(SnapshotRetentionTask.getAllPoliciesWithRetentionEnabled(state), equalTo(Collections.emptyMap())); + + // Test with metadata containing only a policy without retention + state = createState(policyWithout); + assertThat(SnapshotRetentionTask.getAllPoliciesWithRetentionEnabled(state), equalTo(Collections.emptyMap())); + + // Test with metadata containing a couple of policies + state = createState(policyWithout, policyWithout2, policyWith); + Map policyMap = SnapshotRetentionTask.getAllPoliciesWithRetentionEnabled(state); + assertThat(policyMap.size(), equalTo(1)); + assertThat(policyMap.get("policyWith"), equalTo(policyWith)); + } + + public void testSnapshotEligibleForDeletion() { + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy("policy", "snap", "1 * * * * ?", + "repo", null, new SnapshotRetentionConfiguration(TimeValue.timeValueDays(30), null, null)); + SnapshotLifecyclePolicy policyWithNoRetention = new SnapshotLifecyclePolicy("policy", "snap", "1 * * * * ?", + "repo", null, randomBoolean() ? null : SnapshotRetentionConfiguration.EMPTY); + Map policyMap = Collections.singletonMap("policy", policy); + Map policyWithNoRetentionMap = Collections.singletonMap("policy", policyWithNoRetention); + Function>> mkInfos = i -> + Collections.singletonMap("repo", Collections.singletonList(i)); + + // Test when user metadata is null + SnapshotInfo info = new SnapshotInfo(new SnapshotId("name", "uuid"), Collections.singletonList("index"), + 0L, null, 1L, 1, Collections.emptyList(), true, null); + assertThat(SnapshotRetentionTask.snapshotEligibleForDeletion(info, mkInfos.apply(info), policyMap), equalTo(false)); + + // Test when no retention is configured + info = new SnapshotInfo(new SnapshotId("name", "uuid"), Collections.singletonList("index"), + 0L, null, 1L, 1, Collections.emptyList(), true, null); + assertThat(SnapshotRetentionTask.snapshotEligibleForDeletion(info, mkInfos.apply(info), policyWithNoRetentionMap), equalTo(false)); + + // Test when user metadata is a map that doesn't contain "policy" + info = new SnapshotInfo(new SnapshotId("name", "uuid"), Collections.singletonList("index"), + 0L, null, 1L, 1, Collections.emptyList(), true, Collections.singletonMap("foo", "bar")); + assertThat(SnapshotRetentionTask.snapshotEligibleForDeletion(info, mkInfos.apply(info), policyMap), equalTo(false)); + + // Test with an ancient snapshot that should be expunged + info = new SnapshotInfo(new SnapshotId("name", "uuid"), Collections.singletonList("index"), + 0L, null, 1L, 1, Collections.emptyList(), true, Collections.singletonMap("policy", "policy")); + assertThat(SnapshotRetentionTask.snapshotEligibleForDeletion(info, mkInfos.apply(info), policyMap), equalTo(true)); + + // Test with a snapshot that's start date is old enough to be expunged (but the finish date is not) + long time = System.currentTimeMillis() - TimeValue.timeValueDays(30).millis() - 1; + info = new SnapshotInfo(new SnapshotId("name", "uuid"), Collections.singletonList("index"), + time, null, time + TimeValue.timeValueDays(4).millis(), 1, Collections.emptyList(), + true, Collections.singletonMap("policy", "policy")); + assertThat(SnapshotRetentionTask.snapshotEligibleForDeletion(info, mkInfos.apply(info), policyMap), equalTo(true)); + + // Test with a fresh snapshot that should not be expunged + info = new SnapshotInfo(new SnapshotId("name", "uuid"), Collections.singletonList("index"), + System.currentTimeMillis(), null, System.currentTimeMillis() + 1, + 1, Collections.emptyList(), true, Collections.singletonMap("policy", "policy")); + assertThat(SnapshotRetentionTask.snapshotEligibleForDeletion(info, mkInfos.apply(info), policyMap), equalTo(false)); + } + + public void testRetentionTaskSuccess() throws Exception { + retentionTaskTest(true); + } + + public void testRetentionTaskFailure() throws Exception { + retentionTaskTest(false); + } + + private void retentionTaskTest(final boolean deletionSuccess) throws Exception { + try (ThreadPool threadPool = new TestThreadPool("slm-test"); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + Client noOpClient = new NoOpClient("slm-test")) { + + final String policyId = "policy"; + final String repoId = "repo"; + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(policyId, "snap", "1 * * * * ?", + repoId, null, new SnapshotRetentionConfiguration(TimeValue.timeValueDays(30), null, null)); + + ClusterState state = createState(policy); + ClusterServiceUtils.setState(clusterService, state); + + final SnapshotInfo eligibleSnapshot = new SnapshotInfo(new SnapshotId("name", "uuid"), Collections.singletonList("index"), + 0L, null, 1L, 1, Collections.emptyList(), true, Collections.singletonMap("policy", policyId)); + final SnapshotInfo ineligibleSnapshot = new SnapshotInfo(new SnapshotId("name2", "uuid2"), Collections.singletonList("index"), + System.currentTimeMillis(), null, System.currentTimeMillis() + 1, 1, + Collections.emptyList(), true, Collections.singletonMap("policy", policyId)); + + Set deleted = ConcurrentHashMap.newKeySet(); + Set deletedSnapshotsInHistory = ConcurrentHashMap.newKeySet(); + CountDownLatch deletionLatch = new CountDownLatch(1); + CountDownLatch historyLatch = new CountDownLatch(1); + + MockSnapshotRetentionTask retentionTask = new MockSnapshotRetentionTask(noOpClient, clusterService, + new SnapshotLifecycleTaskTests.VerifyingHistoryStore(noOpClient, ZoneOffset.UTC, + (historyItem) -> { + assertEquals(deletionSuccess, historyItem.isSuccess()); + if (historyItem.isSuccess() == false) { + assertThat(historyItem.getErrorDetails(), containsString("deletion_failed")); + } + assertEquals(policyId, historyItem.getPolicyId()); + assertEquals(repoId, historyItem.getRepository()); + assertEquals(DELETE_OPERATION, historyItem.getOperation()); + deletedSnapshotsInHistory.add(historyItem.getSnapshotName()); + historyLatch.countDown(); + }), + threadPool, + () -> { + List snaps = new ArrayList<>(2); + snaps.add(eligibleSnapshot); + snaps.add(ineligibleSnapshot); + logger.info("--> retrieving snapshots [{}]", snaps); + return Collections.singletonMap(repoId, snaps); + }, + (deletionPolicyId, repo, snapId, slmStats, listener) -> { + logger.info("--> deleting {} from repo {}", snapId, repo); + deleted.add(snapId); + if (deletionSuccess) { + listener.onResponse(new AcknowledgedResponse(true)); + } else { + listener.onFailure(new RuntimeException("deletion_failed")); + } + deletionLatch.countDown(); + }, + System::nanoTime); + + long time = System.currentTimeMillis(); + retentionTask.triggered(new SchedulerEngine.Event(SnapshotRetentionService.SLM_RETENTION_JOB_ID, time, time)); + + deletionLatch.await(10, TimeUnit.SECONDS); + + assertThat("something should have been deleted", deleted, not(empty())); + assertThat("one snapshot should have been deleted", deleted, hasSize(1)); + assertThat(deleted, contains(eligibleSnapshot.snapshotId())); + + boolean historySuccess = historyLatch.await(10, TimeUnit.SECONDS); + assertThat("expected history entries for 1 snapshot deletions", historySuccess, equalTo(true)); + assertThat(deletedSnapshotsInHistory, contains(eligibleSnapshot.snapshotId().getName())); + + threadPool.shutdownNow(); + threadPool.awaitTermination(10, TimeUnit.SECONDS); + } + } + + public void testSuccessfulTimeBoundedDeletion() throws Exception { + timeBoundedDeletion(true); + } + + public void testFailureTimeBoundedDeletion() throws Exception { + timeBoundedDeletion(false); + } + + private void timeBoundedDeletion(final boolean deletionSuccess) throws Exception { + try (ThreadPool threadPool = new TestThreadPool("slm-test"); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + Client noOpClient = new NoOpClient("slm-test")) { + + final String policyId = "policy"; + final String repoId = "repo"; + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(policyId, "snap", "1 * * * * ?", + repoId, null, new SnapshotRetentionConfiguration(null, null, 1)); + + ClusterState state = createState(policy); + state = ClusterState.builder(state) + .metaData(MetaData.builder(state.metaData()) + .transientSettings(Settings.builder() + .put(LifecycleSettings.SLM_RETENTION_DURATION, "500ms") + .build())).build(); + ClusterServiceUtils.setState(clusterService, state); + + final SnapshotInfo snap1 = new SnapshotInfo(new SnapshotId("name1", "uuid1"), Collections.singletonList("index"), + 0L, null, 1L, 1, Collections.emptyList(), true, Collections.singletonMap("policy", policyId)); + final SnapshotInfo snap2 = new SnapshotInfo(new SnapshotId("name2", "uuid2"), Collections.singletonList("index"), + 1L, null, 2L, 1, Collections.emptyList(), true, Collections.singletonMap("policy", policyId)); + final SnapshotInfo snap3 = new SnapshotInfo(new SnapshotId("name3", "uuid3"), Collections.singletonList("index"), + 2L, null, 3L, 1, Collections.emptyList(), true, Collections.singletonMap("policy", policyId)); + final SnapshotInfo snap4 = new SnapshotInfo(new SnapshotId("name4", "uuid4"), Collections.singletonList("index"), + 3L, null, 4L, 1, Collections.emptyList(), true, Collections.singletonMap("policy", policyId)); + final SnapshotInfo snap5 = new SnapshotInfo(new SnapshotId("name5", "uuid5"), Collections.singletonList("index"), + 4L, null, 5L, 1, Collections.emptyList(), true, Collections.singletonMap("policy", policyId)); + + final Set deleted = ConcurrentHashMap.newKeySet(); + // We're expected two deletions before they hit the "taken too long" test, so have a latch of 2 + CountDownLatch deletionLatch = new CountDownLatch(2); + CountDownLatch historyLatch = new CountDownLatch(2); + Set deletedSnapshotsInHistory = ConcurrentHashMap.newKeySet(); + AtomicLong nanos = new AtomicLong(System.nanoTime()); + MockSnapshotRetentionTask retentionTask = new MockSnapshotRetentionTask(noOpClient, clusterService, + new SnapshotLifecycleTaskTests.VerifyingHistoryStore(noOpClient, ZoneOffset.UTC, + (historyItem) -> { + assertEquals(deletionSuccess, historyItem.isSuccess()); + if (historyItem.isSuccess() == false) { + assertThat(historyItem.getErrorDetails(), containsString("deletion_failed")); + } + assertEquals(policyId, historyItem.getPolicyId()); + assertEquals(repoId, historyItem.getRepository()); + assertEquals(DELETE_OPERATION, historyItem.getOperation()); + deletedSnapshotsInHistory.add(historyItem.getSnapshotName()); + historyLatch.countDown(); + }), + threadPool, + () -> { + List snaps = Arrays.asList(snap1, snap2, snap3, snap4, snap5); + logger.info("--> retrieving snapshots [{}]", snaps); + return Collections.singletonMap(repoId, snaps); + }, + (deletionPolicyId, repo, snapId, slmStats, listener) -> { + logger.info("--> deleting {}", snapId); + // Don't pause until snapshot 2 + if (snapId.equals(snap2.snapshotId())) { + logger.info("--> pausing for 501ms while deleting snap2 to simulate deletion past a threshold"); + nanos.addAndGet(TimeValue.timeValueMillis(501).nanos()); + } + deleted.add(snapId); + if (deletionSuccess) { + listener.onResponse(new AcknowledgedResponse(true)); + } else { + listener.onFailure(new RuntimeException("deletion_failed")); + } + deletionLatch.countDown(); + }, + nanos::get); + + long time = System.currentTimeMillis(); + retentionTask.triggered(new SchedulerEngine.Event(SnapshotRetentionService.SLM_RETENTION_JOB_ID, time, time)); + + boolean success = deletionLatch.await(10, TimeUnit.SECONDS); + + assertThat("expected 2 snapshot deletions within 10 seconds, deleted: " + deleted, success, equalTo(true)); + + assertNotNull("something should have been deleted", deleted); + assertThat("two snapshots should have been deleted", deleted.size(), equalTo(2)); + assertThat(deleted, containsInAnyOrder(snap1.snapshotId(), snap2.snapshotId())); + + boolean historySuccess = historyLatch.await(10, TimeUnit.SECONDS); + assertThat("expected history entries for 2 snapshot deletions", historySuccess, equalTo(true)); + assertThat(deletedSnapshotsInHistory, containsInAnyOrder(snap1.snapshotId().getName(), snap2.snapshotId().getName())); + + threadPool.shutdownNow(); + threadPool.awaitTermination(10, TimeUnit.SECONDS); + } + } + + public void testOkToDeleteSnapshots() { + final Snapshot snapshot = new Snapshot("repo", new SnapshotId("name", "uuid")); + + SnapshotsInProgress inProgress = new SnapshotsInProgress( + new SnapshotsInProgress.Entry( + snapshot, true, false, SnapshotsInProgress.State.INIT, + Collections.singletonList(new IndexId("name", "id")), 0, 0, + ImmutableOpenMap.builder().build(), Collections.emptyMap())); + ClusterState state = ClusterState.builder(new ClusterName("cluster")) + .putCustom(SnapshotsInProgress.TYPE, inProgress) + .build(); + + assertThat(SnapshotRetentionTask.okayToDeleteSnapshots(state), equalTo(false)); + + SnapshotDeletionsInProgress delInProgress = new SnapshotDeletionsInProgress( + Collections.singletonList(new SnapshotDeletionsInProgress.Entry(snapshot, 0, 0))); + state = ClusterState.builder(new ClusterName("cluster")) + .putCustom(SnapshotDeletionsInProgress.TYPE, delInProgress) + .build(); + + assertThat(SnapshotRetentionTask.okayToDeleteSnapshots(state), equalTo(false)); + + RepositoryCleanupInProgress cleanupInProgress = new RepositoryCleanupInProgress(new RepositoryCleanupInProgress.Entry("repo", 0)); + state = ClusterState.builder(new ClusterName("cluster")) + .putCustom(RepositoryCleanupInProgress.TYPE, cleanupInProgress) + .build(); + + assertThat(SnapshotRetentionTask.okayToDeleteSnapshots(state), equalTo(false)); + + RestoreInProgress restoreInProgress = mock(RestoreInProgress.class); + state = ClusterState.builder(new ClusterName("cluster")) + .putCustom(RestoreInProgress.TYPE, restoreInProgress) + .build(); + + assertThat(SnapshotRetentionTask.okayToDeleteSnapshots(state), equalTo(false)); + } + + public void testSkipWhileStopping() throws Exception { + doTestSkipDuringMode(OperationMode.STOPPING); + } + + public void testSkipWhileStopped() throws Exception { + doTestSkipDuringMode(OperationMode.STOPPED); + } + + private void doTestSkipDuringMode(OperationMode mode) throws Exception { + try (ThreadPool threadPool = new TestThreadPool("slm-test"); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + Client noOpClient = new NoOpClient("slm-test")) { + final String policyId = "policy"; + final String repoId = "repo"; + SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy(policyId, "snap", "1 * * * * ?", + repoId, null, new SnapshotRetentionConfiguration(TimeValue.timeValueDays(30), null, null)); + + ClusterState state = createState(mode, policy); + ClusterServiceUtils.setState(clusterService, state); + + SnapshotRetentionTask task = new MockSnapshotRetentionTask(noOpClient, clusterService, + new SnapshotLifecycleTaskTests.VerifyingHistoryStore(noOpClient, ZoneOffset.UTC, + (historyItem) -> fail("should never write history")), + threadPool, + () -> { + fail("should not retrieve snapshots"); + return null; + }, + (a, b, c, d, e) -> fail("should not delete snapshots"), + System::nanoTime); + + long time = System.currentTimeMillis(); + task.triggered(new SchedulerEngine.Event(SnapshotRetentionService.SLM_RETENTION_JOB_ID, time, time)); + + threadPool.shutdownNow(); + threadPool.awaitTermination(10, TimeUnit.SECONDS); + } + } + + public ClusterState createState(SnapshotLifecyclePolicy... policies) { + return createState(OperationMode.RUNNING, policies); + } + + public ClusterState createState(OperationMode mode, SnapshotLifecyclePolicy... policies) { + Map policyMetadataMap = Arrays.stream(policies) + .map(policy -> SnapshotLifecyclePolicyMetadata.builder() + .setPolicy(policy) + .setHeaders(Collections.emptyMap()) + .setModifiedDate(randomNonNegativeLong()) + .setVersion(randomNonNegativeLong()) + .build()) + .collect(Collectors.toMap(pm -> pm.getPolicy().getId(), pm -> pm)); + + MetaData metaData = MetaData.builder() + .putCustom(SnapshotLifecycleMetadata.TYPE, + new SnapshotLifecycleMetadata(policyMetadataMap, mode, new SnapshotLifecycleStats())) + .build(); + return ClusterState.builder(new ClusterName("cluster")) + .metaData(metaData) + .build(); + } + + private static class MockSnapshotRetentionTask extends SnapshotRetentionTask { + private final Supplier>> snapshotRetriever; + private final DeleteSnapshotMock deleteRunner; + + MockSnapshotRetentionTask(Client client, + ClusterService clusterService, + SnapshotHistoryStore historyStore, + ThreadPool threadPool, + Supplier>> snapshotRetriever, + DeleteSnapshotMock deleteRunner, + LongSupplier nanoSupplier) { + super(client, clusterService, nanoSupplier, historyStore, threadPool); + this.snapshotRetriever = snapshotRetriever; + this.deleteRunner = deleteRunner; + } + + @Override + void getAllSuccessfulSnapshots(Collection repositories, + ActionListener>> listener, + Consumer errorHandler) { + listener.onResponse(this.snapshotRetriever.get()); + } + + @Override + void deleteSnapshot(String policyId, String repo, SnapshotId snapshot, SnapshotLifecycleStats slmStats, + ActionListener listener) { + deleteRunner.apply(policyId, repo, snapshot, slmStats, listener); + } + } + + @FunctionalInterface + interface DeleteSnapshotMock { + void apply(String policyId, String repo, SnapshotId snapshot, SnapshotLifecycleStats slmStats, + ActionListener listener); + } +} diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java index eb89c15e146..5753e8acc20 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; +import org.elasticsearch.xpack.core.ml.notifications.AuditorField; import org.junit.After; import org.junit.Before; @@ -184,7 +185,8 @@ public class DeleteExpiredDataIT extends MlNativeAutodetectIntegTestCase { long totalModelSizeStatsBeforeDelete = client().prepareSearch("*") .setQuery(QueryBuilders.termQuery("result_type", "model_size_stats")) .get().getHits().getTotalHits().value; - long totalNotificationsCountBeforeDelete = client().prepareSearch(".ml-notifications").get().getHits().getTotalHits().value; + long totalNotificationsCountBeforeDelete = + client().prepareSearch(AuditorField.NOTIFICATIONS_INDEX).get().getHits().getTotalHits().value; assertThat(totalModelSizeStatsBeforeDelete, greaterThan(0L)); assertThat(totalNotificationsCountBeforeDelete, greaterThan(0L)); @@ -234,7 +236,8 @@ public class DeleteExpiredDataIT extends MlNativeAutodetectIntegTestCase { long totalModelSizeStatsAfterDelete = client().prepareSearch("*") .setQuery(QueryBuilders.termQuery("result_type", "model_size_stats")) .get().getHits().getTotalHits().value; - long totalNotificationsCountAfterDelete = client().prepareSearch(".ml-notifications").get().getHits().getTotalHits().value; + long totalNotificationsCountAfterDelete = + client().prepareSearch(AuditorField.NOTIFICATIONS_INDEX).get().getHits().getTotalHits().value; assertThat(totalModelSizeStatsAfterDelete, equalTo(totalModelSizeStatsBeforeDelete)); assertThat(totalNotificationsCountAfterDelete, greaterThanOrEqualTo(totalNotificationsCountBeforeDelete)); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java index 7f018f967fb..fb1a4a6f004 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DetectionRulesIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.xpack.core.ml.job.config.Operator; import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; import org.elasticsearch.xpack.core.ml.job.config.RuleScope; import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; +import org.elasticsearch.xpack.core.ml.notifications.AuditorField; import org.junit.After; import java.io.IOException; @@ -186,7 +187,8 @@ public class DetectionRulesIT extends MlNativeAutodetectIntegTestCase { // Wait until the notification that the filter was updated is indexed assertBusy(() -> { - SearchResponse searchResponse = client().prepareSearch(".ml-notifications") + SearchResponse searchResponse = + client().prepareSearch(AuditorField.NOTIFICATIONS_INDEX) .setSize(1) .addSort("timestamp", SortOrder.DESC) .setQuery(QueryBuilders.boolQuery() diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java index 7e9ea18a1b3..ee40b6f494a 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java @@ -5,11 +5,18 @@ */ package org.elasticsearch.xpack.ml.integration; +import org.elasticsearch.action.admin.indices.refresh.RefreshAction; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ml.action.DeleteDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsStatsAction; @@ -23,15 +30,19 @@ import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.analyses.OutlierDetection; import org.elasticsearch.xpack.core.ml.dataframe.analyses.Regression; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.notifications.AuditorField; import org.elasticsearch.xpack.core.ml.utils.PhaseProgress; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsTask; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; /** * Base class of ML integration tests that use a native data_frame_analytics process @@ -46,6 +57,8 @@ abstract class MlNativeDataFrameAnalyticsIntegTestCase extends MlNativeIntegTest } private void cleanUpAnalytics() { + stopAnalyticsAndForceStopOnError(); + for (DataFrameAnalyticsConfig config : analytics) { try { assertThat(deleteAnalytics(config.getId()).isAcknowledged(), is(true)); @@ -56,6 +69,20 @@ abstract class MlNativeDataFrameAnalyticsIntegTestCase extends MlNativeIntegTest } } + private void stopAnalyticsAndForceStopOnError() { + try { + assertThat(stopAnalytics("*").isStopped(), is(true)); + } catch (Exception e) { + logger.error("Failed to stop data frame analytics jobs; trying force", e); + try { + assertThat(forceStopAnalytics("*").isStopped(), is(true)); + } catch (Exception e2) { + logger.error("Force-stopping data frame analytics jobs failed", e2); + } + throw new RuntimeException("Had to resort to force-stopping jobs, something went wrong?", e); + } + } + protected void registerAnalytics(DataFrameAnalyticsConfig config) { if (analytics.add(config) == false) { throw new IllegalArgumentException("analytics config [" + config.getId() + "] is already registered"); @@ -82,6 +109,12 @@ abstract class MlNativeDataFrameAnalyticsIntegTestCase extends MlNativeIntegTest return client().execute(StopDataFrameAnalyticsAction.INSTANCE, request).actionGet(); } + protected StopDataFrameAnalyticsAction.Response forceStopAnalytics(String id) { + StopDataFrameAnalyticsAction.Request request = new StopDataFrameAnalyticsAction.Request(id); + request.setForce(true); + return client().execute(StopDataFrameAnalyticsAction.INSTANCE, request).actionGet(); + } + protected void waitUntilAnalyticsIsStopped(String id) throws Exception { waitUntilAnalyticsIsStopped(id, TimeValue.timeValueSeconds(30)); } @@ -151,4 +184,43 @@ abstract class MlNativeDataFrameAnalyticsIntegTestCase extends MlNativeIntegTest configBuilder.setAnalysis(regression); return configBuilder.build(); } + + /** + * Asserts whether the audit messages fetched from index match provided prefixes. + * More specifically, in order to pass: + * 1. the number of fetched messages must equal the number of provided prefixes + * AND + * 2. each fetched message must start with the corresponding prefix + */ + protected static void assertThatAuditMessagesMatch(String configId, String... expectedAuditMessagePrefixes) throws Exception { + // Make sure we wrote to the audit + // Since calls to write the AbstractAuditor are sent and forgot (async) we could have returned from the start, + // finished the job (as this is a very short analytics job), all without the audit being fully written. + assertBusy(() -> assertTrue(indexExists(AuditorField.NOTIFICATIONS_INDEX))); + assertBusy(() -> { + String[] actualAuditMessages = fetchAllAuditMessages(configId); + assertThat(actualAuditMessages.length, equalTo(expectedAuditMessagePrefixes.length)); + for (int i = 0; i < actualAuditMessages.length; i++) { + assertThat(actualAuditMessages[i], startsWith(expectedAuditMessagePrefixes[i])); + } + }); + } + + @SuppressWarnings("unchecked") + private static String[] fetchAllAuditMessages(String dataFrameAnalyticsId) throws Exception { + RefreshRequest refreshRequest = new RefreshRequest(AuditorField.NOTIFICATIONS_INDEX); + RefreshResponse refreshResponse = client().execute(RefreshAction.INSTANCE, refreshRequest).actionGet(); + assertThat(refreshResponse.getStatus().getStatus(), anyOf(equalTo(200), equalTo(201))); + + SearchRequest searchRequest = new SearchRequestBuilder(client(), SearchAction.INSTANCE) + .setIndices(AuditorField.NOTIFICATIONS_INDEX) + .addSort("timestamp", SortOrder.ASC) + .setQuery(QueryBuilders.termQuery("job_id", dataFrameAnalyticsId)) + .request(); + SearchResponse searchResponse = client().execute(SearchAction.INSTANCE, searchRequest).actionGet(); + + return Arrays.stream(searchResponse.getHits().getHits()) + .map(hit -> (String) hit.getSourceAsMap().get("message")) + .toArray(String[]::new); + } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java index d6bc2aeaee1..4a9f682d2c2 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RegressionIT.java @@ -11,10 +11,12 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.analyses.Regression; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.junit.After; import java.util.Arrays; @@ -102,6 +104,14 @@ public class RegressionIT extends MlNativeDataFrameAnalyticsIntegTestCase { assertProgress(jobId, 100, 100, 100, 100); assertThat(searchStoredProgress(jobId).getHits().getTotalHits().value, equalTo(1L)); + assertThatAuditMessagesMatch(jobId, + "Created analytics with analysis type [regression]", + "Estimated memory usage for this analytics to be", + "Started analytics", + "Creating destination index [regression_single_numeric_feature_and_mixed_data_set_source_index_results]", + "Finished reindexing to destination index [regression_single_numeric_feature_and_mixed_data_set_source_index_results]", + "Finished analysis"); + assertModelStatePersisted(jobId); } public void testWithOnlyTrainingRowsAndTrainingPercentIsHundred() throws Exception { @@ -161,6 +171,14 @@ public class RegressionIT extends MlNativeDataFrameAnalyticsIntegTestCase { assertProgress(jobId, 100, 100, 100, 100); assertThat(searchStoredProgress(jobId).getHits().getTotalHits().value, equalTo(1L)); + assertThatAuditMessagesMatch(jobId, + "Created analytics with analysis type [regression]", + "Estimated memory usage for this analytics to be", + "Started analytics", + "Creating destination index [regression_only_training_data_and_training_percent_is_hundred_source_index_results]", + "Finished reindexing to destination index [regression_only_training_data_and_training_percent_is_hundred_source_index_results]", + "Finished analysis"); + assertModelStatePersisted(jobId); } public void testWithOnlyTrainingRowsAndTrainingPercentIsFifty() throws Exception { @@ -230,5 +248,21 @@ public class RegressionIT extends MlNativeDataFrameAnalyticsIntegTestCase { assertProgress(jobId, 100, 100, 100, 100); assertThat(searchStoredProgress(jobId).getHits().getTotalHits().value, equalTo(1L)); + assertThatAuditMessagesMatch(jobId, + "Created analytics with analysis type [regression]", + "Estimated memory usage for this analytics to be", + "Started analytics", + "Creating destination index [regression_only_training_data_and_training_percent_is_fifty_source_index_results]", + "Finished reindexing to destination index [regression_only_training_data_and_training_percent_is_fifty_source_index_results]", + "Finished analysis"); + assertModelStatePersisted(jobId); + } + + private void assertModelStatePersisted(String jobId) { + String docId = jobId + "_regression_state#1"; + SearchResponse searchResponse = client().prepareSearch(AnomalyDetectorsIndex.jobStateIndexPattern()) + .setQuery(QueryBuilders.idsQuery().addIds(docId)) + .get(); + assertThat(searchResponse.getHits().getHits().length, equalTo(1)); } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java index 88cc74a48b0..83a1839654b 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java @@ -115,6 +115,13 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest assertProgress(id, 100, 100, 100, 100); assertThat(searchStoredProgress(id).getHits().getTotalHits().value, equalTo(1L)); + assertThatAuditMessagesMatch(id, + "Created analytics with analysis type [outlier_detection]", + "Estimated memory usage for this analytics to be", + "Started analytics", + "Creating destination index [test-outlier-detection-with-few-docs-results]", + "Finished reindexing to destination index [test-outlier-detection-with-few-docs-results]", + "Finished analysis"); } public void testOutlierDetectionWithEnoughDocumentsToScroll() throws Exception { @@ -162,6 +169,13 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest assertProgress(id, 100, 100, 100, 100); assertThat(searchStoredProgress(id).getHits().getTotalHits().value, equalTo(1L)); + assertThatAuditMessagesMatch(id, + "Created analytics with analysis type [outlier_detection]", + "Estimated memory usage for this analytics to be", + "Started analytics", + "Creating destination index [test-outlier-detection-with-enough-docs-to-scroll-results]", + "Finished reindexing to destination index [test-outlier-detection-with-enough-docs-to-scroll-results]", + "Finished analysis"); } public void testOutlierDetectionWithMoreFieldsThanDocValueFieldLimit() throws Exception { @@ -234,9 +248,16 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest assertProgress(id, 100, 100, 100, 100); assertThat(searchStoredProgress(id).getHits().getTotalHits().value, equalTo(1L)); + assertThatAuditMessagesMatch(id, + "Created analytics with analysis type [outlier_detection]", + "Estimated memory usage for this analytics to be", + "Started analytics", + "Creating destination index [test-outlier-detection-with-more-fields-than-docvalue-limit-results]", + "Finished reindexing to destination index [test-outlier-detection-with-more-fields-than-docvalue-limit-results]", + "Finished analysis"); } - public void testStopOutlierDetectionWithEnoughDocumentsToScroll() { + public void testStopOutlierDetectionWithEnoughDocumentsToScroll() throws Exception { String sourceIndex = "test-stop-outlier-detection-with-enough-docs-to-scroll"; client().admin().indices().prepareCreate(sourceIndex) @@ -265,7 +286,7 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest assertState(id, DataFrameAnalyticsState.STOPPED); startAnalytics(id); - assertState(id, DataFrameAnalyticsState.STARTED); + // State here could be any of STARTED, REINDEXING or ANALYZING assertThat(stopAnalytics(id).isStopped(), is(true)); assertState(id, DataFrameAnalyticsState.STOPPED); @@ -284,6 +305,13 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest } else { logger.debug("We stopped during reindexing: [{}] < [{}]", searchResponse.getHits().getTotalHits().value, docCount); } + + assertThatAuditMessagesMatch(id, + "Created analytics with analysis type [outlier_detection]", + "Estimated memory usage for this analytics to be", + "Started analytics", + "Creating destination index [test-stop-outlier-detection-with-enough-docs-to-scroll-results]", + "Stopped analytics"); } public void testOutlierDetectionWithMultipleSourceIndices() throws Exception { @@ -338,6 +366,13 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest assertProgress(id, 100, 100, 100, 100); assertThat(searchStoredProgress(id).getHits().getTotalHits().value, equalTo(1L)); + assertThatAuditMessagesMatch(id, + "Created analytics with analysis type [outlier_detection]", + "Estimated memory usage for this analytics to be", + "Started analytics", + "Creating destination index [test-outlier-detection-with-multiple-source-indices-results]", + "Finished reindexing to destination index [test-outlier-detection-with-multiple-source-indices-results]", + "Finished analysis"); } public void testOutlierDetectionWithPreExistingDestIndex() throws Exception { @@ -388,9 +423,16 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest assertProgress(id, 100, 100, 100, 100); assertThat(searchStoredProgress(id).getHits().getTotalHits().value, equalTo(1L)); + assertThatAuditMessagesMatch(id, + "Created analytics with analysis type [outlier_detection]", + "Estimated memory usage for this analytics to be", + "Started analytics", + "Using existing destination index [test-outlier-detection-with-pre-existing-dest-index-results]", + "Finished reindexing to destination index [test-outlier-detection-with-pre-existing-dest-index-results]", + "Finished analysis"); } - public void testModelMemoryLimitLowerThanEstimatedMemoryUsage() { + public void testModelMemoryLimitLowerThanEstimatedMemoryUsage() throws Exception { String sourceIndex = "test-model-memory-limit"; client().admin().indices().prepareCreate(sourceIndex) @@ -429,5 +471,9 @@ public class RunDataFrameAnalyticsIT extends MlNativeDataFrameAnalyticsIntegTest exception.getMessage(), startsWith("Cannot start because the configured model memory limit [" + modelMemoryLimit + "] is lower than the expected memory usage")); + + assertThatAuditMessagesMatch(id, + "Created analytics with analysis type [outlier_detection]", + "Estimated memory usage for this analytics to be"); } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java index 2086adb869f..fd728f39545 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobUpdate; import org.elasticsearch.xpack.core.ml.job.results.AnomalyRecord; import org.elasticsearch.xpack.core.ml.job.results.Bucket; +import org.elasticsearch.xpack.core.ml.notifications.AuditorField; import org.junit.After; import java.io.IOException; @@ -223,7 +224,8 @@ public class ScheduledEventsIT extends MlNativeAutodetectIntegTestCase { // Wait until the notification that the process was updated is indexed assertBusy(() -> { - SearchResponse searchResponse = client().prepareSearch(".ml-notifications") + SearchResponse searchResponse = + client().prepareSearch(AuditorField.NOTIFICATIONS_INDEX) .setSize(1) .addSort("timestamp", SortOrder.DESC) .setQuery(QueryBuilders.boolQuery() @@ -298,7 +300,8 @@ public class ScheduledEventsIT extends MlNativeAutodetectIntegTestCase { // Wait until the notification that the job was updated is indexed assertBusy(() -> { - SearchResponse searchResponse = client().prepareSearch(".ml-notifications") + SearchResponse searchResponse = + client().prepareSearch(AuditorField.NOTIFICATIONS_INDEX) .setSize(1) .addSort("timestamp", SortOrder.DESC) .setQuery(QueryBuilders.boolQuery() diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index fe245fc4552..bd43879792a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -216,6 +216,7 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.NativeNormalizerProcess import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerFactory; import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerProcessFactory; import org.elasticsearch.xpack.ml.notifications.AnomalyDetectionAuditor; +import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.process.NativeController; import org.elasticsearch.xpack.ml.process.NativeControllerHolder; @@ -367,6 +368,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu private final SetOnce autodetectProcessManager = new SetOnce<>(); private final SetOnce datafeedManager = new SetOnce<>(); private final SetOnce dataFrameAnalyticsManager = new SetOnce<>(); + private final SetOnce dataFrameAnalyticsAuditor = new SetOnce<>(); private final SetOnce memoryTracker = new SetOnce<>(); public MachineLearning(Settings settings, Path configPath) { @@ -470,6 +472,8 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu } AnomalyDetectionAuditor anomalyDetectionAuditor = new AnomalyDetectionAuditor(client, clusterService.getNodeName()); + DataFrameAnalyticsAuditor dataFrameAnalyticsAuditor = new DataFrameAnalyticsAuditor(client, clusterService.getNodeName()); + this.dataFrameAnalyticsAuditor.set(dataFrameAnalyticsAuditor); JobResultsProvider jobResultsProvider = new JobResultsProvider(client, settings); JobResultsPersister jobResultsPersister = new JobResultsPersister(client); JobDataCountsPersister jobDataCountsPersister = new JobDataCountsPersister(client); @@ -510,7 +514,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu client, clusterService); normalizerProcessFactory = new NativeNormalizerProcessFactory(environment, nativeController, clusterService); - analyticsProcessFactory = new NativeAnalyticsProcessFactory(environment, nativeController, clusterService); + analyticsProcessFactory = new NativeAnalyticsProcessFactory(environment, client, nativeController, clusterService); memoryEstimationProcessFactory = new NativeMemoryUsageEstimationProcessFactory(environment, nativeController, clusterService); } catch (IOException e) { @@ -558,10 +562,10 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu MemoryUsageEstimationProcessManager memoryEstimationProcessManager = new MemoryUsageEstimationProcessManager( threadPool.generic(), threadPool.executor(MachineLearning.JOB_COMMS_THREAD_POOL_NAME), memoryEstimationProcessFactory); - DataFrameAnalyticsConfigProvider dataFrameAnalyticsConfigProvider = new DataFrameAnalyticsConfigProvider(client); + DataFrameAnalyticsConfigProvider dataFrameAnalyticsConfigProvider = new DataFrameAnalyticsConfigProvider(client, xContentRegistry); assert client instanceof NodeClient; - DataFrameAnalyticsManager dataFrameAnalyticsManager = new DataFrameAnalyticsManager((NodeClient) client, - dataFrameAnalyticsConfigProvider, analyticsProcessManager); + DataFrameAnalyticsManager dataFrameAnalyticsManager = new DataFrameAnalyticsManager( + (NodeClient) client, dataFrameAnalyticsConfigProvider, analyticsProcessManager, dataFrameAnalyticsAuditor); this.dataFrameAnalyticsManager.set(dataFrameAnalyticsManager); // Components shared by anomaly detection and data frame analytics @@ -592,6 +596,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu jobDataCountsPersister, datafeedManager, anomalyDetectionAuditor, + dataFrameAnalyticsAuditor, new MlAssignmentNotifier(settings, anomalyDetectionAuditor, threadPool, client, clusterService), memoryTracker, analyticsProcessManager, @@ -614,7 +619,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu memoryTracker.get(), client), new TransportStartDatafeedAction.StartDatafeedPersistentTasksExecutor(datafeedManager.get()), new TransportStartDataFrameAnalyticsAction.TaskExecutor(settings, client, clusterService, dataFrameAnalyticsManager.get(), - memoryTracker.get()) + dataFrameAnalyticsAuditor.get(), memoryTracker.get()) ); } @@ -906,8 +911,12 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu public static boolean allTemplatesInstalled(ClusterState clusterState) { boolean allPresent = true; - List templateNames = Arrays.asList(AuditorField.NOTIFICATIONS_INDEX, MlMetaIndex.INDEX_NAME, - AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX, AnomalyDetectorsIndex.jobResultsIndexPrefix()); + List templateNames = + Arrays.asList( + AuditorField.NOTIFICATIONS_INDEX, + MlMetaIndex.INDEX_NAME, + AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX, + AnomalyDetectorsIndex.jobResultsIndexPrefix()); for (String templateName : templateNames) { allPresent = allPresent && TemplateUtils.checkTemplateExistsAndVersionIsGTECurrentVersion(templateName, clusterState); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java index 052422e3cf3..82152de8f69 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java @@ -40,14 +40,17 @@ import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.DeleteDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsTask; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; +import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.utils.MlIndicesUtils; import java.io.IOException; +import java.util.Objects; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -65,17 +68,20 @@ public class TransportDeleteDataFrameAnalyticsAction private final Client client; private final MlMemoryTracker memoryTracker; private final DataFrameAnalyticsConfigProvider configProvider; + private final DataFrameAnalyticsAuditor auditor; @Inject public TransportDeleteDataFrameAnalyticsAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client, - MlMemoryTracker memoryTracker, DataFrameAnalyticsConfigProvider configProvider) { + MlMemoryTracker memoryTracker, DataFrameAnalyticsConfigProvider configProvider, + DataFrameAnalyticsAuditor auditor) { super(DeleteDataFrameAnalyticsAction.NAME, transportService, clusterService, threadPool, actionFilters, DeleteDataFrameAnalyticsAction.Request::new, indexNameExpressionResolver); this.client = client; this.memoryTracker = memoryTracker; this.configProvider = configProvider; + this.auditor = Objects.requireNonNull(auditor); } @Override @@ -112,7 +118,7 @@ public class TransportDeleteDataFrameAnalyticsAction // We clean up the memory tracker on delete because there is no stop; the task stops by itself memoryTracker.removeDataFrameAnalyticsJob(id); - // Step 2. Delete the config + // Step 3. Delete the config ActionListener deleteStateHandler = ActionListener.wrap( bulkByScrollResponse -> { if (bulkByScrollResponse.isTimedOut()) { @@ -130,7 +136,7 @@ public class TransportDeleteDataFrameAnalyticsAction listener::onFailure ); - // Step 1. Delete state + // Step 2. Delete state ActionListener configListener = ActionListener.wrap( config -> deleteState(parentTaskClient, id, deleteStateHandler), listener::onFailure @@ -152,6 +158,7 @@ public class TransportDeleteDataFrameAnalyticsAction } assert deleteResponse.getResult() == DocWriteResponse.Result.DELETED; LOGGER.info("[{}] Deleted", id); + auditor.info(id, Messages.DATA_FRAME_ANALYTICS_AUDIT_DELETED); listener.onResponse(new AcknowledgedResponse(true)); }, listener::onFailure diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDataFrameAnalyticsAction.java index c70093bd885..03bb8d44172 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutDataFrameAnalyticsAction.java @@ -46,6 +46,7 @@ import org.elasticsearch.xpack.core.security.authz.permission.ResourcePrivileges import org.elasticsearch.xpack.core.security.support.Exceptions; import org.elasticsearch.xpack.ml.dataframe.SourceDestValidator; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; +import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; import java.io.IOException; import java.time.Instant; @@ -64,6 +65,7 @@ public class TransportPutDataFrameAnalyticsAction private final Client client; private final ClusterService clusterService; private final IndexNameExpressionResolver indexNameExpressionResolver; + private final DataFrameAnalyticsAuditor auditor; private volatile ByteSizeValue maxModelMemoryLimit; @@ -71,7 +73,7 @@ public class TransportPutDataFrameAnalyticsAction public TransportPutDataFrameAnalyticsAction(Settings settings, TransportService transportService, ActionFilters actionFilters, XPackLicenseState licenseState, Client client, ThreadPool threadPool, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - DataFrameAnalyticsConfigProvider configProvider) { + DataFrameAnalyticsConfigProvider configProvider, DataFrameAnalyticsAuditor auditor) { super(PutDataFrameAnalyticsAction.NAME, transportService, actionFilters, PutDataFrameAnalyticsAction.Request::new); this.licenseState = licenseState; this.configProvider = configProvider; @@ -81,6 +83,7 @@ public class TransportPutDataFrameAnalyticsAction this.client = client; this.clusterService = clusterService; this.indexNameExpressionResolver = Objects.requireNonNull(indexNameExpressionResolver); + this.auditor = Objects.requireNonNull(auditor); maxModelMemoryLimit = MachineLearningField.MAX_MODEL_MEMORY_LIMIT.get(settings); clusterService.getClusterSettings() @@ -179,7 +182,14 @@ public class TransportPutDataFrameAnalyticsAction client, clusterState, ActionListener.wrap( - unused -> configProvider.put(config, headers, listener), + unused -> configProvider.put(config, headers, ActionListener.wrap( + indexResponse -> { + auditor.info( + config.getId(), + Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_CREATED, config.getAnalysis().getWriteableName())); + listener.onResponse(indexResponse); + }, + listener::onFailure)), listener::onFailure)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java index 27840e1cd94..fe28a0c639c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java @@ -53,6 +53,7 @@ import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; @@ -63,6 +64,7 @@ import org.elasticsearch.xpack.ml.dataframe.SourceDestValidator; import org.elasticsearch.xpack.ml.dataframe.extractor.DataFrameDataExtractorFactory; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; import org.elasticsearch.xpack.ml.job.JobNodeSelector; +import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import java.io.IOException; @@ -91,13 +93,15 @@ public class TransportStartDataFrameAnalyticsAction private final PersistentTasksService persistentTasksService; private final DataFrameAnalyticsConfigProvider configProvider; private final MlMemoryTracker memoryTracker; + private final DataFrameAnalyticsAuditor auditor; @Inject public TransportStartDataFrameAnalyticsAction(TransportService transportService, Client client, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, XPackLicenseState licenseState, IndexNameExpressionResolver indexNameExpressionResolver, PersistentTasksService persistentTasksService, - DataFrameAnalyticsConfigProvider configProvider, MlMemoryTracker memoryTracker) { + DataFrameAnalyticsConfigProvider configProvider, MlMemoryTracker memoryTracker, + DataFrameAnalyticsAuditor auditor) { super(StartDataFrameAnalyticsAction.NAME, transportService, clusterService, threadPool, actionFilters, StartDataFrameAnalyticsAction.Request::new, indexNameExpressionResolver); this.licenseState = licenseState; @@ -105,6 +109,7 @@ public class TransportStartDataFrameAnalyticsAction this.persistentTasksService = persistentTasksService; this.configProvider = configProvider; this.memoryTracker = memoryTracker; + this.auditor = Objects.requireNonNull(auditor); } @Override @@ -146,8 +151,8 @@ public class TransportStartDataFrameAnalyticsAction @Override public void onFailure(Exception e) { if (e instanceof ResourceAlreadyExistsException) { - e = new ElasticsearchStatusException("Cannot open data frame analytics [" + request.getId() + - "] because it has already been opened", RestStatus.CONFLICT, e); + e = new ElasticsearchStatusException("Cannot start data frame analytics [" + request.getId() + + "] because it has already been started", RestStatus.CONFLICT, e); } listener.onFailure(e); } @@ -169,6 +174,11 @@ public class TransportStartDataFrameAnalyticsAction // Tell the job tracker to refresh the memory requirement for this job and all other jobs that have persistent tasks ActionListener estimateMemoryUsageListener = ActionListener.wrap( estimateMemoryUsageResponse -> { + auditor.info( + request.getId(), + Messages.getMessage( + Messages.DATA_FRAME_ANALYTICS_AUDIT_ESTIMATED_MEMORY_USAGE, + estimateMemoryUsageResponse.getExpectedMemoryWithoutDisk())); // Validate that model memory limit is sufficient to run the analysis if (configHolder.get().getModelMemoryLimit() .compareTo(estimateMemoryUsageResponse.getExpectedMemoryWithoutDisk()) < 0) { @@ -302,6 +312,7 @@ public class TransportStartDataFrameAnalyticsAction // what would have happened if the error had been detected in the "fast fail" validation cancelAnalyticsStart(task, predicate.exception, listener); } else { + auditor.info(task.getParams().getId(), Messages.DATA_FRAME_ANALYTICS_AUDIT_STARTED); listener.onResponse(new AcknowledgedResponse(true)); } } @@ -313,8 +324,8 @@ public class TransportStartDataFrameAnalyticsAction @Override public void onTimeout(TimeValue timeout) { - listener.onFailure(new ElasticsearchException("Starting data frame analytics [" + task.getParams().getId() - + "] timed out after [" + timeout + "]")); + listener.onFailure(new ElasticsearchException( + "Starting data frame analytics [" + task.getParams().getId() + "] timed out after [" + timeout + "]")); } }); } @@ -323,7 +334,7 @@ public class TransportStartDataFrameAnalyticsAction * Important: the methods of this class must NOT throw exceptions. If they did then the callers * of endpoints waiting for a condition tested by this predicate would never get a response. */ - private class AnalyticsPredicate implements Predicate> { + private static class AnalyticsPredicate implements Predicate> { private volatile Exception exception; @@ -407,18 +418,21 @@ public class TransportStartDataFrameAnalyticsAction private final Client client; private final ClusterService clusterService; private final DataFrameAnalyticsManager manager; + private final DataFrameAnalyticsAuditor auditor; private final MlMemoryTracker memoryTracker; private volatile int maxMachineMemoryPercent; private volatile int maxLazyMLNodes; private volatile int maxOpenJobs; + private volatile ClusterState clusterState; public TaskExecutor(Settings settings, Client client, ClusterService clusterService, DataFrameAnalyticsManager manager, - MlMemoryTracker memoryTracker) { + DataFrameAnalyticsAuditor auditor, MlMemoryTracker memoryTracker) { super(MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, MachineLearning.UTILITY_THREAD_POOL_NAME); this.client = Objects.requireNonNull(client); this.clusterService = Objects.requireNonNull(clusterService); this.manager = Objects.requireNonNull(manager); + this.auditor = Objects.requireNonNull(auditor); this.memoryTracker = Objects.requireNonNull(memoryTracker); this.maxMachineMemoryPercent = MachineLearning.MAX_MACHINE_MEMORY_PERCENT.get(settings); this.maxLazyMLNodes = MachineLearning.MAX_LAZY_ML_NODES.get(settings); @@ -427,6 +441,7 @@ public class TransportStartDataFrameAnalyticsAction .addSettingsUpdateConsumer(MachineLearning.MAX_MACHINE_MEMORY_PERCENT, this::setMaxMachineMemoryPercent); clusterService.getClusterSettings().addSettingsUpdateConsumer(MachineLearning.MAX_LAZY_ML_NODES, this::setMaxLazyMLNodes); clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_OPEN_JOBS_PER_NODE, this::setMaxOpenJobs); + clusterService.addListener(event -> clusterState = event.state()); } @Override @@ -434,8 +449,8 @@ public class TransportStartDataFrameAnalyticsAction long id, String type, String action, TaskId parentTaskId, PersistentTasksCustomMetaData.PersistentTask persistentTask, Map headers) { - return new DataFrameAnalyticsTask(id, type, action, parentTaskId, headers, client, clusterService, manager, - persistentTask.getParams()); + return new DataFrameAnalyticsTask( + id, type, action, parentTaskId, headers, client, clusterService, manager, auditor, persistentTask.getParams()); } @Override @@ -493,10 +508,10 @@ public class TransportStartDataFrameAnalyticsAction DataFrameAnalyticsTaskState startedState = new DataFrameAnalyticsTaskState(DataFrameAnalyticsState.STARTED, task.getAllocationId(), null); task.updatePersistentTaskState(startedState, ActionListener.wrap( - response -> manager.execute((DataFrameAnalyticsTask) task, DataFrameAnalyticsState.STARTED), + response -> manager.execute((DataFrameAnalyticsTask) task, DataFrameAnalyticsState.STARTED, clusterState), task::markAsFailed)); } else { - manager.execute((DataFrameAnalyticsTask)task, analyticsTaskState.getState()); + manager.execute((DataFrameAnalyticsTask) task, analyticsTaskState.getState(), clusterState); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java index d6d67aeeddd..00522696b45 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java @@ -32,16 +32,19 @@ import org.elasticsearch.xpack.core.ml.action.StopDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsTask; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; +import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; @@ -59,17 +62,20 @@ public class TransportStopDataFrameAnalyticsAction private final ThreadPool threadPool; private final PersistentTasksService persistentTasksService; private final DataFrameAnalyticsConfigProvider configProvider; + private final DataFrameAnalyticsAuditor auditor; @Inject public TransportStopDataFrameAnalyticsAction(TransportService transportService, ActionFilters actionFilters, ClusterService clusterService, ThreadPool threadPool, PersistentTasksService persistentTasksService, - DataFrameAnalyticsConfigProvider configProvider) { + DataFrameAnalyticsConfigProvider configProvider, + DataFrameAnalyticsAuditor auditor) { super(StopDataFrameAnalyticsAction.NAME, clusterService, transportService, actionFilters, StopDataFrameAnalyticsAction.Request::new, StopDataFrameAnalyticsAction.Response::new, StopDataFrameAnalyticsAction.Response::new, ThreadPool.Names.SAME); this.threadPool = threadPool; this.persistentTasksService = persistentTasksService; this.configProvider = configProvider; + this.auditor = Objects.requireNonNull(auditor); } @Override @@ -258,7 +264,10 @@ public class TransportStopDataFrameAnalyticsAction persistentTasksService.waitForPersistentTasksCondition(persistentTasks -> filterPersistentTasks(persistentTasks, analyticsIds).isEmpty(), request.getTimeout(), ActionListener.wrap( - booleanResponse -> listener.onResponse(response), + booleanResponse -> { + auditor.info(request.getId(), Messages.DATA_FRAME_ANALYTICS_AUDIT_STOPPED); + listener.onResponse(response); + }, listener::onFailure )); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java index 85ed2f531f8..257a1947528 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java @@ -97,6 +97,7 @@ class DatafeedJob { void isolate() { isIsolated = true; + timingStatsReporter.disallowPersisting(); } boolean isIsolated() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index a60797562d0..457701740e0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ml.datafeed; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -95,7 +96,12 @@ public class DatafeedManager { @Override public void onFailure(Exception e) { - finishHandler.accept(e); + if (e instanceof ResourceNotFoundException) { + // The task was stopped in the meantime, no need to do anything + logger.info("[{}] Aborting as datafeed has been stopped", datafeedId); + } else { + finishHandler.accept(e); + } } }); }, finishHandler::accept diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java index 283b667f7b8..b11761541df 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporter.java @@ -33,12 +33,15 @@ public class DatafeedTimingStatsReporter { private volatile DatafeedTimingStats currentTimingStats; /** Object used to persist current timing stats. */ private final DatafeedTimingStatsPersister persister; + /** Whether or not timing stats will be persisted by the persister object. */ + private volatile boolean allowedPersisting; public DatafeedTimingStatsReporter(DatafeedTimingStats timingStats, DatafeedTimingStatsPersister persister) { Objects.requireNonNull(timingStats); this.persistedTimingStats = new DatafeedTimingStats(timingStats); this.currentTimingStats = new DatafeedTimingStats(timingStats); this.persister = Objects.requireNonNull(persister); + this.allowedPersisting = true; } /** Gets current timing stats. */ @@ -79,6 +82,11 @@ public class DatafeedTimingStatsReporter { } } + /** Disallows persisting timing stats. After this call finishes, no document will be persisted. */ + public void disallowPersisting() { + allowedPersisting = false; + } + private void flushIfDifferSignificantly() { if (differSignificantly(currentTimingStats, persistedTimingStats)) { flush(WriteRequest.RefreshPolicy.NONE); @@ -87,7 +95,9 @@ public class DatafeedTimingStatsReporter { private void flush(WriteRequest.RefreshPolicy refreshPolicy) { persistedTimingStats = new DatafeedTimingStats(currentTimingStats); - persister.persistDatafeedTimingStats(persistedTimingStats, refreshPolicy); + if (allowedPersisting) { + persister.persistDatafeedTimingStats(persistedTimingStats, refreshPolicy); + } } /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java index c9e1604bf21..f887d8cff85 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.dataframe; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; @@ -19,6 +20,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.reindex.BulkByScrollResponse; @@ -30,10 +32,13 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.dataframe.extractor.DataFrameDataExtractorFactory; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; import org.elasticsearch.xpack.ml.dataframe.process.AnalyticsProcessManager; +import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; import java.time.Clock; import java.util.Objects; @@ -51,15 +56,17 @@ public class DataFrameAnalyticsManager { private final NodeClient client; private final DataFrameAnalyticsConfigProvider configProvider; private final AnalyticsProcessManager processManager; + private final DataFrameAnalyticsAuditor auditor; public DataFrameAnalyticsManager(NodeClient client, DataFrameAnalyticsConfigProvider configProvider, - AnalyticsProcessManager processManager) { + AnalyticsProcessManager processManager, DataFrameAnalyticsAuditor auditor) { this.client = Objects.requireNonNull(client); this.configProvider = Objects.requireNonNull(configProvider); this.processManager = Objects.requireNonNull(processManager); + this.auditor = Objects.requireNonNull(auditor); } - public void execute(DataFrameAnalyticsTask task, DataFrameAnalyticsState currentState) { + public void execute(DataFrameAnalyticsTask task, DataFrameAnalyticsState currentState, ClusterState clusterState) { ActionListener reindexingStateListener = ActionListener.wrap( config -> reindexDataframeAndStartAnalysis(task, config), error -> task.updateState(DataFrameAnalyticsState.FAILED, error.getMessage()) @@ -76,7 +83,13 @@ public class DataFrameAnalyticsManager { case STARTED: task.updatePersistentTaskState(reindexingState, ActionListener.wrap( updatedTask -> reindexingStateListener.onResponse(config), - reindexingStateListener::onFailure)); + error -> { + if (error instanceof ResourceNotFoundException) { + // The task has been stopped + } else { + reindexingStateListener.onFailure(error); + } + })); break; // The task has fully reindexed the documents and we should continue on with our analyses case ANALYZING: @@ -112,7 +125,13 @@ public class DataFrameAnalyticsManager { ); // Retrieve configuration - configProvider.get(task.getParams().getId(), configListener); + ActionListener stateAliasListener = ActionListener.wrap( + aBoolean -> configProvider.get(task.getParams().getId(), configListener), + configListener::onFailure + ); + + // Make sure the state index and alias exist + AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary(client, clusterState, stateAliasListener); } private void reindexDataframeAndStartAnalysis(DataFrameAnalyticsTask task, DataFrameAnalyticsConfig config) { @@ -143,6 +162,9 @@ public class DataFrameAnalyticsManager { return; } task.setReindexingFinished(); + auditor.info( + config.getId(), + Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_FINISHED_REINDEXING, config.getDest().getIndex())); ClientHelper.executeAsyncWithOrigin(client, ClientHelper.ML_ORIGIN, RefreshAction.INSTANCE, @@ -175,6 +197,9 @@ public class DataFrameAnalyticsManager { // Create destination index if it does not exist ActionListener destIndexListener = ActionListener.wrap( indexResponse -> { + auditor.info( + config.getId(), + Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_REUSING_DEST_INDEX, indexResponse.indices()[0])); LOGGER.info("[{}] Using existing destination index [{}]", config.getId(), indexResponse.indices()[0]); DataFrameAnalyticsIndex.updateMappingsToDestIndex(client, config, indexResponse, ActionListener.wrap( acknowledgedResponse -> copyIndexCreatedListener.onResponse(null), @@ -183,6 +208,9 @@ public class DataFrameAnalyticsManager { }, e -> { if (org.elasticsearch.ExceptionsHelper.unwrapCause(e) instanceof IndexNotFoundException) { + auditor.info( + config.getId(), + Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_CREATING_DEST_INDEX, config.getDest().getIndex())); LOGGER.info("[{}] Creating destination index [{}]", config.getId(), config.getDest().getIndex()); DataFrameAnalyticsIndex.createDestinationIndex(client, Clock.systemUTC(), config, copyIndexCreatedListener); } else { @@ -210,10 +238,17 @@ public class DataFrameAnalyticsManager { if (error != null) { task.updateState(DataFrameAnalyticsState.FAILED, error.getMessage()); } else { + auditor.info(config.getId(), Messages.DATA_FRAME_ANALYTICS_AUDIT_FINISHED_ANALYSIS); task.markAsCompleted(); } }), - error -> task.updateState(DataFrameAnalyticsState.FAILED, error.getMessage()) + error -> { + if (error instanceof ResourceNotFoundException) { + // Task has stopped + } else { + task.updateState(DataFrameAnalyticsState.FAILED, error.getMessage()); + } + } )); }, error -> task.updateState(DataFrameAnalyticsState.FAILED, error.getMessage()) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTask.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTask.java index 1e3cbdf016a..2a172fd6d9c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTask.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTask.java @@ -31,10 +31,12 @@ import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsStatsAction; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.PhaseProgress; import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; import java.util.Arrays; import java.util.List; @@ -52,6 +54,7 @@ public class DataFrameAnalyticsTask extends AllocatedPersistentTask implements S private final Client client; private final ClusterService clusterService; private final DataFrameAnalyticsManager analyticsManager; + private final DataFrameAnalyticsAuditor auditor; private final StartDataFrameAnalyticsAction.TaskParams taskParams; @Nullable private volatile Long reindexingTaskId; @@ -61,11 +64,12 @@ public class DataFrameAnalyticsTask extends AllocatedPersistentTask implements S public DataFrameAnalyticsTask(long id, String type, String action, TaskId parentTask, Map headers, Client client, ClusterService clusterService, DataFrameAnalyticsManager analyticsManager, - StartDataFrameAnalyticsAction.TaskParams taskParams) { + DataFrameAnalyticsAuditor auditor, StartDataFrameAnalyticsAction.TaskParams taskParams) { super(id, type, action, MlTasks.DATA_FRAME_ANALYTICS_TASK_ID_PREFIX + taskParams.getId(), parentTask, headers); this.client = Objects.requireNonNull(client); this.clusterService = Objects.requireNonNull(clusterService); this.analyticsManager = Objects.requireNonNull(analyticsManager); + this.auditor = Objects.requireNonNull(auditor); this.taskParams = Objects.requireNonNull(taskParams); } @@ -96,7 +100,12 @@ public class DataFrameAnalyticsTask extends AllocatedPersistentTask implements S @Override public void markAsCompleted() { - persistProgress(() -> super.markAsCompleted()); + // It is possible that the stop API has been called in the meantime and that + // may also cause this method to be called. We check whether we have already + // been marked completed to avoid doing it twice. + if (isCompleted() == false) { + persistProgress(() -> super.markAsCompleted()); + } } @Override @@ -154,11 +163,17 @@ public class DataFrameAnalyticsTask extends AllocatedPersistentTask implements S public void updateState(DataFrameAnalyticsState state, @Nullable String reason) { DataFrameAnalyticsTaskState newTaskState = new DataFrameAnalyticsTaskState(state, getAllocationId(), reason); - updatePersistentTaskState(newTaskState, ActionListener.wrap( - updatedTask -> LOGGER.info("[{}] Successfully update task state to [{}]", getParams().getId(), state), - e -> LOGGER.error(new ParameterizedMessage("[{}] Could not update task state to [{}] with reason [{}]", - getParams().getId(), state, reason), e) - )); + updatePersistentTaskState( + newTaskState, + ActionListener.wrap( + updatedTask -> { + auditor.info(getParams().getId(), Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_UPDATED_STATE, state)); + LOGGER.info("[{}] Successfully update task state to [{}]", getParams().getId(), state); + }, + e -> LOGGER.error(new ParameterizedMessage("[{}] Could not update task state to [{}] with reason [{}]", + getParams().getId(), state, reason), e) + ) + ); } public void updateReindexTaskProgress(ActionListener listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java index 569469452cf..d8d0cd775dd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java @@ -5,18 +5,31 @@ */ package org.elasticsearch.xpack.ml.dataframe.persistence; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsAction; @@ -26,11 +39,15 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; @@ -38,6 +55,8 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; public class DataFrameAnalyticsConfigProvider { + private static final Logger logger = LogManager.getLogger(DataFrameAnalyticsConfigProvider.class); + private static final int MAX_CONFIGS_SIZE = 10000; private static final Map TO_XCONTENT_PARAMS; @@ -50,9 +69,11 @@ public class DataFrameAnalyticsConfigProvider { } private final Client client; + private final NamedXContentRegistry xContentRegistry; - public DataFrameAnalyticsConfigProvider(Client client) { + public DataFrameAnalyticsConfigProvider(Client client, NamedXContentRegistry xContentRegistry) { this.client = Objects.requireNonNull(client); + this.xContentRegistry = xContentRegistry; } public void put(DataFrameAnalyticsConfig config, Map headers, ActionListener listener) { @@ -119,4 +140,54 @@ public class DataFrameAnalyticsConfigProvider { executeAsyncWithOrigin(client, ML_ORIGIN, GetDataFrameAnalyticsAction.INSTANCE, request, ActionListener.wrap( response -> listener.onResponse(response.getResources().results()), listener::onFailure)); } + + /** + * Unlike {@link #getMultiple(String, boolean, ActionListener)} this method tries to get the configs that match jobs with tasks. + * It expects concrete ids and it does not throw if there is no config for a given id. + */ + public void getConfigsForJobsWithTasksLeniently(Set jobsWithTask, ActionListener> listener) { + BoolQueryBuilder query = QueryBuilders.boolQuery(); + query.filter(QueryBuilders.termQuery(DataFrameAnalyticsConfig.CONFIG_TYPE.getPreferredName(), DataFrameAnalyticsConfig.TYPE)); + query.filter(QueryBuilders.termsQuery(DataFrameAnalyticsConfig.ID.getPreferredName(), jobsWithTask)); + + SearchRequest searchRequest = new SearchRequest(AnomalyDetectorsIndex.configIndexName()); + searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); + searchRequest.source().size(DataFrameAnalyticsConfigProvider.MAX_CONFIGS_SIZE); + searchRequest.source().query(query); + + executeAsyncWithOrigin(client.threadPool().getThreadContext(), + ML_ORIGIN, + searchRequest, + new ActionListener() { + @Override + public void onResponse(SearchResponse searchResponse) { + SearchHit[] hits = searchResponse.getHits().getHits(); + List configs = new ArrayList<>(hits.length); + for (SearchHit hit : hits) { + BytesReference sourceBytes = hit.getSourceRef(); + try (InputStream stream = sourceBytes.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream)) { + configs.add(DataFrameAnalyticsConfig.LENIENT_PARSER.apply(parser, null).build()); + } catch (IOException e) { + listener.onFailure(e); + } + } + + + Set tasksWithoutConfigs = new HashSet<>(jobsWithTask); + tasksWithoutConfigs.removeAll(configs.stream().map(DataFrameAnalyticsConfig::getId).collect(Collectors.toList())); + if (tasksWithoutConfigs.isEmpty() == false) { + logger.warn("Data frame analytics tasks {} have no configs", tasksWithoutConfigs); + } + listener.onResponse(configs); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, + client::search); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessConfig.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessConfig.java index 5093404812a..ae05c61148f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessConfig.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessConfig.java @@ -16,6 +16,7 @@ import java.util.Set; public class AnalyticsProcessConfig implements ToXContentObject { + private static final String JOB_ID = "job_id"; private static final String ROWS = "rows"; private static final String COLS = "cols"; private static final String MEMORY_LIMIT = "memory_limit"; @@ -24,6 +25,7 @@ public class AnalyticsProcessConfig implements ToXContentObject { private static final String RESULTS_FIELD = "results_field"; private static final String CATEGORICAL_FIELDS = "categorical_fields"; + private final String jobId; private final long rows; private final int cols; private final ByteSizeValue memoryLimit; @@ -32,8 +34,9 @@ public class AnalyticsProcessConfig implements ToXContentObject { private final Set categoricalFields; private final DataFrameAnalysis analysis; - public AnalyticsProcessConfig(long rows, int cols, ByteSizeValue memoryLimit, int threads, String resultsField, + public AnalyticsProcessConfig(String jobId, long rows, int cols, ByteSizeValue memoryLimit, int threads, String resultsField, Set categoricalFields, DataFrameAnalysis analysis) { + this.jobId = Objects.requireNonNull(jobId); this.rows = rows; this.cols = cols; this.memoryLimit = Objects.requireNonNull(memoryLimit); @@ -54,6 +57,7 @@ public class AnalyticsProcessConfig implements ToXContentObject { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); + builder.field(JOB_ID, jobId); builder.field(ROWS, rows); builder.field(COLS, cols); builder.field(MEMORY_LIMIT, memoryLimit.getBytes()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessFactory.java index e72d1ad51a5..0be90b2e0ae 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessFactory.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.ml.dataframe.process; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; + import java.util.concurrent.ExecutorService; import java.util.function.Consumer; @@ -13,12 +15,12 @@ public interface AnalyticsProcessFactory { /** * Create an implementation of {@link AnalyticsProcess} * - * @param jobId The job id + * @param config The data frame analytics config * @param analyticsProcessConfig The process configuration - * @param executorService Executor service used to start the async tasks a job needs to operate the analytical process - * @param onProcessCrash Callback to execute if the process stops unexpectedly + * @param executorService Executor service used to start the async tasks a job needs to operate the analytical process + * @param onProcessCrash Callback to execute if the process stops unexpectedly * @return The process */ - AnalyticsProcess createAnalyticsProcess(String jobId, AnalyticsProcessConfig analyticsProcessConfig, + AnalyticsProcess createAnalyticsProcess(DataFrameAnalyticsConfig config, AnalyticsProcessConfig analyticsProcessConfig, ExecutorService executorService, Consumer onProcessCrash); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java index 245afa1cbab..ccb65fa2156 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java @@ -172,9 +172,10 @@ public class AnalyticsProcessManager { process.writeRecord(headerRecord); } - private AnalyticsProcess createProcess(DataFrameAnalyticsTask task, AnalyticsProcessConfig analyticsProcessConfig) { + private AnalyticsProcess createProcess(DataFrameAnalyticsTask task, DataFrameAnalyticsConfig config, + AnalyticsProcessConfig analyticsProcessConfig) { ExecutorService executorService = threadPool.executor(MachineLearning.JOB_COMMS_THREAD_POOL_NAME); - AnalyticsProcess process = processFactory.createAnalyticsProcess(task.getParams().getId(), analyticsProcessConfig, + AnalyticsProcess process = processFactory.createAnalyticsProcess(config, analyticsProcessConfig, executorService, onProcessCrash(task)); if (process.isProcessAlive() == false) { throw ExceptionsHelper.serverError("Failed to start data frame analytics process"); @@ -289,7 +290,7 @@ public class AnalyticsProcessManager { LOGGER.info("[{}] no data found to analyze. Will not start analytics native process.", config.getId()); return false; } - process = createProcess(task, analyticsProcessConfig); + process = createProcess(task, config, analyticsProcessConfig); DataFrameRowsJoiner dataFrameRowsJoiner = new DataFrameRowsJoiner(config.getId(), client, dataExtractorFactory.newExtractor(true)); resultProcessor = new AnalyticsResultProcessor(id, dataFrameRowsJoiner, this::isProcessKilled, task.getProgressTracker()); @@ -299,7 +300,7 @@ public class AnalyticsProcessManager { private AnalyticsProcessConfig createProcessConfig(DataFrameAnalyticsConfig config, DataFrameDataExtractor dataExtractor) { DataFrameDataExtractor.DataSummary dataSummary = dataExtractor.collectDataSummary(); Set categoricalFields = dataExtractor.getCategoricalFields(); - AnalyticsProcessConfig processConfig = new AnalyticsProcessConfig(dataSummary.rows, dataSummary.cols, + AnalyticsProcessConfig processConfig = new AnalyticsProcessConfig(config.getId(), dataSummary.rows, dataSummary.cols, config.getModelMemoryLimit(), 1, config.getDest().getResultsField(), categoricalFields, config.getAnalysis()); return processConfig; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java index 6512dc075d7..ed42f86cc4b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java @@ -63,6 +63,7 @@ public class MemoryUsageEstimationProcessManager { } AnalyticsProcessConfig processConfig = new AnalyticsProcessConfig( + jobId, dataSummary.rows, dataSummary.cols, // For memory estimation the model memory limit here should be set high enough not to trigger an error when C++ code @@ -74,7 +75,7 @@ public class MemoryUsageEstimationProcessManager { config.getAnalysis()); AnalyticsProcess process = processFactory.createAnalyticsProcess( - jobId, + config, processConfig, executorServiceForProcess, // The handler passed here will never be called as AbstractNativeProcess.detectCrash method returns early when diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/NativeAnalyticsProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/NativeAnalyticsProcessFactory.java index 6aad810959f..9662298308e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/NativeAnalyticsProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/NativeAnalyticsProcessFactory.java @@ -7,14 +7,17 @@ package org.elasticsearch.xpack.ml.dataframe.process; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.dataframe.process.results.AnalyticsResult; +import org.elasticsearch.xpack.ml.process.IndexingStateProcessor; import org.elasticsearch.xpack.ml.process.NativeController; import org.elasticsearch.xpack.ml.process.ProcessPipes; import org.elasticsearch.xpack.ml.utils.NamedPipeHelper; @@ -34,12 +37,14 @@ public class NativeAnalyticsProcessFactory implements AnalyticsProcessFactory onProcessCrash) { + public NativeAnalyticsProcess createAnalyticsProcess(DataFrameAnalyticsConfig config, AnalyticsProcessConfig analyticsProcessConfig, + ExecutorService executorService, Consumer onProcessCrash) { + String jobId = config.getId(); List filesToDelete = new ArrayList<>(); ProcessPipes processPipes = new ProcessPipes(env, NAMED_PIPE_HELPER, AnalyticsBuilder.ANALYTICS, jobId, - true, false, true, true, false, false); + true, false, true, true, false, config.getAnalysis().persistsState()); // The extra 2 are for the checksum and the control field int numberOfFields = analyticsProcessConfig.cols() + 2; @@ -67,7 +73,7 @@ public class NativeAnalyticsProcessFactory implements AnalyticsProcessFactory filesToDelete, ProcessPipes processPipes) { AnalyticsBuilder analyticsBuilder = diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/NativeMemoryUsageEstimationProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/NativeMemoryUsageEstimationProcessFactory.java index 3c573701f36..f635e43a63b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/NativeMemoryUsageEstimationProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/NativeMemoryUsageEstimationProcessFactory.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.dataframe.process.results.MemoryUsageEstimationResult; @@ -52,18 +53,18 @@ public class NativeMemoryUsageEstimationProcessFactory implements AnalyticsProce @Override public NativeMemoryUsageEstimationProcess createAnalyticsProcess( - String jobId, + DataFrameAnalyticsConfig config, AnalyticsProcessConfig analyticsProcessConfig, ExecutorService executorService, Consumer onProcessCrash) { List filesToDelete = new ArrayList<>(); ProcessPipes processPipes = new ProcessPipes( - env, NAMED_PIPE_HELPER, AnalyticsBuilder.ANALYTICS, jobId, true, false, false, true, false, false); + env, NAMED_PIPE_HELPER, AnalyticsBuilder.ANALYTICS, config.getId(), true, false, false, true, false, false); - createNativeProcess(jobId, analyticsProcessConfig, filesToDelete, processPipes); + createNativeProcess(config.getId(), analyticsProcessConfig, filesToDelete, processPipes); NativeMemoryUsageEstimationProcess process = new NativeMemoryUsageEstimationProcess( - jobId, + config.getId(), processPipes.getLogStream().get(), // Memory estimation process does not use the input pipe, hence null. null, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java index a683f856c8c..26f9353639c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java @@ -17,7 +17,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectStateProcessor; +import org.elasticsearch.xpack.ml.process.IndexingStateProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; import org.elasticsearch.xpack.ml.job.results.AutodetectResult; import org.elasticsearch.xpack.ml.process.NativeController; @@ -77,7 +77,7 @@ public class NativeAutodetectProcessFactory implements AutodetectProcessFactory // The extra 1 is the control field int numberOfFields = job.allInputFields().size() + (includeTokensField ? 1 : 0) + 1; - AutodetectStateProcessor stateProcessor = new AutodetectStateProcessor(client, job.getId()); + IndexingStateProcessor stateProcessor = new IndexingStateProcessor(client, job.getId()); ProcessResultsParser resultsParser = new ProcessResultsParser<>(AutodetectResult.PARSER); NativeAutodetectProcess autodetect = new NativeAutodetectProcess( job.getId(), processPipes.getLogStream().get(), processPipes.getProcessInStream().get(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/DataFrameAnalyticsAuditor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/DataFrameAnalyticsAuditor.java new file mode 100644 index 00000000000..1c9be78d241 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/notifications/DataFrameAnalyticsAuditor.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.notifications; + +import org.elasticsearch.client.Client; +import org.elasticsearch.xpack.core.common.notifications.AbstractAuditor; +import org.elasticsearch.xpack.core.ml.notifications.AuditorField; +import org.elasticsearch.xpack.core.ml.notifications.DataFrameAnalyticsAuditMessage; + +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; + +public class DataFrameAnalyticsAuditor extends AbstractAuditor { + + public DataFrameAnalyticsAuditor(Client client, String nodeName) { + super(client, nodeName, AuditorField.NOTIFICATIONS_INDEX, ML_ORIGIN, DataFrameAnalyticsAuditMessage::new); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessor.java similarity index 91% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessor.java index 1a418bfb2a1..9bfd22500e0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessor.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.autodetect.output; +package org.elasticsearch.xpack.ml.process; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -15,7 +15,6 @@ import org.elasticsearch.common.bytes.CompositeBytesReference; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; -import org.elasticsearch.xpack.ml.process.StateProcessor; import java.io.IOException; import java.io.InputStream; @@ -25,18 +24,18 @@ import java.util.List; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; /** - * Reads the autodetect state and persists via a bulk request + * Reads state documents of a stream, splits them and persists to an index via a bulk request */ -public class AutodetectStateProcessor implements StateProcessor { +public class IndexingStateProcessor implements StateProcessor { - private static final Logger LOGGER = LogManager.getLogger(AutodetectStateProcessor.class); + private static final Logger LOGGER = LogManager.getLogger(IndexingStateProcessor.class); private static final int READ_BUF_SIZE = 8192; private final Client client; private final String jobId; - public AutodetectStateProcessor(Client client, String jobId) { + public IndexingStateProcessor(Client client, String jobId) { this.client = client; this.jobId = jobId; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java index 1aaaf1a65d2..5b5cd008f05 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java @@ -36,6 +36,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Phaser; @@ -352,10 +353,10 @@ public class MlMemoryTracker implements LocalNodeMasterListener { return; } - String startedJobIds = mlDataFrameAnalyticsJobTasks.stream() - .map(task -> ((StartDataFrameAnalyticsAction.TaskParams) task.getParams()).getId()).sorted().collect(Collectors.joining(",")); + Set jobsWithTasks = mlDataFrameAnalyticsJobTasks.stream().map( + task -> ((StartDataFrameAnalyticsAction.TaskParams) task.getParams()).getId()).collect(Collectors.toSet()); - configProvider.getMultiple(startedJobIds, false, ActionListener.wrap( + configProvider.getConfigsForJobsWithTasksLeniently(jobsWithTasks, ActionListener.wrap( analyticsConfigs -> { for (DataFrameAnalyticsConfig analyticsConfig : analyticsConfigs) { memoryRequirementByDataFrameAnalyticsJob.put(analyticsConfig.getId(), diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java index fff735418be..62a4b4ef4d4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedTimingStatsReporterTests.java @@ -132,6 +132,15 @@ public class DatafeedTimingStatsReporterTests extends ESTestCase { verifyNoMoreInteractions(timingStatsPersister); } + public void testDisallowPersisting() { + DatafeedTimingStatsReporter reporter = createReporter(createDatafeedTimingStats(JOB_ID, 0, 0, 0.0)); + reporter.disallowPersisting(); + // This call would normally trigger persisting but because of the "disallowPersisting" call above it will not. + reporter.reportSearchDuration(ONE_SECOND); + + verifyZeroInteractions(timingStatsPersister); + } + public void testTimingStatsDifferSignificantly() { assertThat( DatafeedTimingStatsReporter.differSignificantly( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessorTests.java index 7f26293cabf..60bfd0eb3e3 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsResultProcessorTests.java @@ -88,7 +88,7 @@ public class AnalyticsResultProcessorTests extends ESTestCase { private void givenDataFrameRows(int rows) { AnalyticsProcessConfig config = new AnalyticsProcessConfig( - rows, 1, ByteSizeValue.ZERO, 1, "ml", Collections.emptySet(), mock(DataFrameAnalysis.class)); + "job_id", rows, 1, ByteSizeValue.ZERO, 1, "ml", Collections.emptySet(), mock(DataFrameAnalysis.class)); when(process.getConfig()).thenReturn(config); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java index 9790e0618da..5be7a03b851 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java @@ -29,7 +29,6 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; @@ -68,7 +67,7 @@ public class MemoryUsageEstimationProcessManagerTests extends ESTestCase { process = mock(AnalyticsProcess.class); when(process.readAnalyticsResults()).thenReturn(Arrays.asList(PROCESS_RESULT).iterator()); processFactory = mock(AnalyticsProcessFactory.class); - when(processFactory.createAnalyticsProcess(anyString(), any(), any(), any())).thenReturn(process); + when(processFactory.createAnalyticsProcess(any(), any(), any(), any())).thenReturn(process); dataExtractor = mock(DataFrameDataExtractor.class); when(dataExtractor.collectDataSummary()).thenReturn(new DataFrameDataExtractor.DataSummary(NUM_ROWS, NUM_COLS)); dataExtractorFactory = mock(DataFrameDataExtractorFactory.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java index a797053d013..7eb23b0a29a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AnnotationIndexIT.java @@ -56,7 +56,7 @@ public class AnnotationIndexIT extends MlSingleNodeTestCase { AnomalyDetectionAuditor auditor = new AnomalyDetectionAuditor(client(), "node_1"); auditor.info("whatever", "blah"); - // Creating a document in the .ml-notifications index should cause .ml-annotations + // Creating a document in the .ml-notifications-000001 index should cause .ml-annotations // to be created, as it should get created as soon as any other ML index exists assertBusy(() -> { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java index 2692ed3552d..1c771cd7f6b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedTimingStats; @@ -249,7 +250,7 @@ public class JobResultsPersisterTests extends ESTestCase { // Take the listener passed to client::index as 2nd argument ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; // Handle the response on the listener - listener.onResponse(new IndexResponse(null, null, null, 0, 0, 0, false)); + listener.onResponse(new IndexResponse(new ShardId("test", "test", 0), "_doc", "test", 0, 0, 0, false)); return null; }) .when(client).index(any(), any(ActionListener.class)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java index 0fc7c83b54c..9cd6343eab7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.ml.job.process.autodetect; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; -import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectStateProcessor; +import org.elasticsearch.xpack.ml.process.IndexingStateProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.TimeRange; @@ -62,7 +62,7 @@ public class NativeAutodetectProcessTests extends ESTestCase { mock(OutputStream.class), outputStream, mock(OutputStream.class), NUMBER_FIELDS, null, new ProcessResultsParser<>(AutodetectResult.PARSER), mock(Consumer.class))) { - process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); + process.start(executorService, mock(IndexingStateProcessor.class), mock(InputStream.class)); ZonedDateTime startTime = process.getProcessStartTime(); Thread.sleep(500); @@ -85,7 +85,7 @@ public class NativeAutodetectProcessTests extends ESTestCase { try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, bos, outputStream, mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyList(), new ProcessResultsParser<>(AutodetectResult.PARSER), mock(Consumer.class))) { - process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); + process.start(executorService, mock(IndexingStateProcessor.class), mock(InputStream.class)); process.writeRecord(record); process.flushStream(); @@ -120,7 +120,7 @@ public class NativeAutodetectProcessTests extends ESTestCase { try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, bos, outputStream, mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyList(), new ProcessResultsParser<>(AutodetectResult.PARSER), mock(Consumer.class))) { - process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); + process.start(executorService, mock(IndexingStateProcessor.class), mock(InputStream.class)); FlushJobParams params = FlushJobParams.builder().build(); process.flushJob(params); @@ -170,7 +170,7 @@ public class NativeAutodetectProcessTests extends ESTestCase { try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, bos, outputStream, mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyList(), new ProcessResultsParser<>(AutodetectResult.PARSER), mock(Consumer.class))) { - process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); + process.start(executorService, mock(IndexingStateProcessor.class), mock(InputStream.class)); writeFunction.accept(process); process.writeUpdateModelPlotMessage(new ModelPlotConfig()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessorTests.java similarity index 95% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessorTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessorTests.java index 4f5477a75f8..f574782746c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessorTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.autodetect.output; +package org.elasticsearch.xpack.ml.process; import com.carrotsearch.randomizedtesting.annotations.Timeout; import org.elasticsearch.action.ActionFuture; @@ -36,7 +36,7 @@ import static org.mockito.Mockito.when; /** * Tests for reading state from the native process. */ -public class AutodetectStateProcessorTests extends ESTestCase { +public class IndexingStateProcessorTests extends ESTestCase { private static final String STATE_SAMPLE = "" + "{\"index\": {\"_index\": \"test\", \"_id\": \"1\"}}\n" @@ -55,14 +55,14 @@ public class AutodetectStateProcessorTests extends ESTestCase { private static final int LARGE_DOC_SIZE = 1000000; private Client client; - private AutodetectStateProcessor stateProcessor; + private IndexingStateProcessor stateProcessor; @Before public void initialize() throws IOException { client = mock(Client.class); @SuppressWarnings("unchecked") ActionFuture bulkResponseFuture = mock(ActionFuture.class); - stateProcessor = spy(new AutodetectStateProcessor(client, JOB_ID)); + stateProcessor = spy(new IndexingStateProcessor(client, JOB_ID)); when(client.bulk(any(BulkRequest.class))).thenReturn(bulkResponseFuture); ThreadPool threadPool = mock(ThreadPool.class); when(client.threadPool()).thenReturn(threadPool); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java index ed17be1f3dc..c27b8bb07d1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java @@ -30,6 +30,7 @@ import org.junit.Before; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutorService; @@ -39,7 +40,6 @@ import java.util.function.Consumer; import static org.hamcrest.CoreMatchers.instanceOf; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.doAnswer; @@ -122,7 +122,7 @@ public class MlMemoryTrackerTests extends ESTestCase { String jobId = "job" + i; verify(jobResultsProvider, times(1)).getEstablishedMemoryUsage(eq(jobId), any(), any(), any(), any()); } - verify(configProvider, times(1)).getMultiple(eq(String.join(",", allIds)), eq(false), any()); + verify(configProvider, times(1)).getConfigsForJobsWithTasksLeniently(eq(new HashSet<>(allIds)), any()); } else { verify(jobResultsProvider, never()).getEstablishedMemoryUsage(anyString(), any(), any(), any(), any()); } @@ -161,10 +161,10 @@ public class MlMemoryTrackerTests extends ESTestCase { doAnswer(invocation -> { @SuppressWarnings("unchecked") ActionListener> listener = - (ActionListener>) invocation.getArguments()[2]; + (ActionListener>) invocation.getArguments()[1]; listener.onFailure(new IllegalArgumentException("computer says no")); return null; - }).when(configProvider).getMultiple(anyString(), anyBoolean(), any()); + }).when(configProvider).getConfigsForJobsWithTasksLeniently(any(), any()); AtomicBoolean gotErrorResponse = new AtomicBoolean(false); memoryTracker.refresh(persistentTasks, @@ -177,10 +177,10 @@ public class MlMemoryTrackerTests extends ESTestCase { doAnswer(invocation -> { @SuppressWarnings("unchecked") ActionListener> listener = - (ActionListener>) invocation.getArguments()[2]; + (ActionListener>) invocation.getArguments()[1]; listener.onResponse(Collections.emptyList()); return null; - }).when(configProvider).getMultiple(anyString(), anyBoolean(), any()); + }).when(configProvider).getConfigsForJobsWithTasksLeniently(any(), any()); AtomicBoolean gotSuccessResponse = new AtomicBoolean(false); memoryTracker.refresh(persistentTasks, diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index 668d62a9718..4d2fb8d5d7a 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -22,7 +22,9 @@ import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.indexing.IndexerState; @@ -159,8 +161,11 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE private final RollupJob job; private final SchedulerEngine schedulerEngine; private final ThreadPool threadPool; - private final RollupIndexer indexer; - private AtomicBoolean upgradedDocumentID; + private final Client client; + private final IndexerState initialIndexerState; + private final Map initialPosition; + private RollupIndexer indexer; + private final AtomicBoolean upgradedDocumentID = new AtomicBoolean(false); RollupJobTask(long id, String type, String action, TaskId parentTask, RollupJob job, RollupJobStatus state, Client client, SchedulerEngine schedulerEngine, ThreadPool threadPool, Map headers) { @@ -168,35 +173,15 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE this.job = job; this.schedulerEngine = schedulerEngine; this.threadPool = threadPool; - - // We can assume the new ID scheme only for new jobs - this.upgradedDocumentID = new AtomicBoolean(true); - - // If status is not null, we are resuming rather than starting fresh. - Map initialPosition = null; - IndexerState initialState = IndexerState.STOPPED; - if (state != null) { - final IndexerState existingState = state.getIndexerState(); - logger.debug("We have existing state, setting state to [" + existingState + "] " + - "and current position to [" + state.getPosition() + "] for job [" + job.getConfig().getId() + "]"); - if (existingState.equals(IndexerState.INDEXING)) { - /* - * If we were indexing, we have to reset back to STARTED otherwise the indexer will be "stuck" thinking - * it is indexing but without the actual indexing thread running. - */ - initialState = IndexerState.STARTED; - - } else if (existingState.equals(IndexerState.ABORTING) || existingState.equals(IndexerState.STOPPING)) { - // It shouldn't be possible to persist ABORTING, but if for some reason it does, - // play it safe and restore the job as STOPPED. An admin will have to clean it up, - // but it won't be running, and won't delete itself either. Safest option. - // If we were STOPPING, that means it persisted but was killed before finally stopped... so ok - // to restore as STOPPED - initialState = IndexerState.STOPPED; - } else { - initialState = existingState; - } - initialPosition = state.getPosition(); + this.client = client; + if (state == null) { + this.initialIndexerState = null; + this.initialPosition = null; + // We can assume the new ID scheme only for new jobs + this.upgradedDocumentID.set(true); + } else { + this.initialIndexerState = state.getIndexerState(); + this.initialPosition = state.getPosition(); // Since we have state, we are resuming a job/checkpoint. Although we are resuming // from something that was checkpointed, we can't guarantee it was the _final_ checkpoint @@ -207,8 +192,39 @@ public class RollupJobTask extends AllocatedPersistentTask implements SchedulerE // be true if it actually finished a full checkpoint. this.upgradedDocumentID.set(state.isUpgradedDocumentID()); } + + } + + @Override + protected void init(PersistentTasksService persistentTasksService, TaskManager taskManager, + String persistentTaskId, long allocationId) { + super.init(persistentTasksService, taskManager, persistentTaskId, allocationId); + + // If status is not null, we are resuming rather than starting fresh. + IndexerState initialState = IndexerState.STOPPED; + if (initialIndexerState != null) { + logger.debug("We have existing state, setting state to [" + initialIndexerState + "] " + + "and current position to [" + initialIndexerState + "] for job [" + job.getConfig().getId() + "]"); + if (initialIndexerState.equals(IndexerState.INDEXING)) { + /* + * If we were indexing, we have to reset back to STARTED otherwise the indexer will be "stuck" thinking + * it is indexing but without the actual indexing thread running. + */ + initialState = IndexerState.STARTED; + + } else if (initialIndexerState.equals(IndexerState.ABORTING) || initialIndexerState.equals(IndexerState.STOPPING)) { + // It shouldn't be possible to persist ABORTING, but if for some reason it does, + // play it safe and restore the job as STOPPED. An admin will have to clean it up, + // but it won't be running, and won't delete itself either. Safest option. + // If we were STOPPING, that means it persisted but was killed before finally stopped... so ok + // to restore as STOPPED + initialState = IndexerState.STOPPED; + } else { + initialState = initialIndexerState; + } + } this.indexer = new ClientRollupPageManager(job, initialState, initialPosition, - new ParentTaskAssigningClient(client, new TaskId(getPersistentTaskId())), upgradedDocumentID); + new ParentTaskAssigningClient(client, getParentTaskId()), upgradedDocumentID); } @Override diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index c4fb6d8f348..7f5a8232a6d 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DateFieldMapper; @@ -91,7 +92,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { private void setup() { settings = createIndexSettings(); queryShardContext = new QueryShardContext(0, settings, - null, null, null, null, null, null, + BigArrays.NON_RECYCLING_INSTANCE, null, null, null, null, null, null, null, null, null, () -> 0L, null); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index 59073e763c2..1ebc65ef83a 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -67,8 +68,10 @@ public class RollupJobTaskTests extends ESTestCase { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, status, client, schedulerEngine, pool, Collections.emptyMap()); + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -80,8 +83,10 @@ public class RollupJobTaskTests extends ESTestCase { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); + TaskId taskId = new TaskId("node", 123); RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, status, client, schedulerEngine, pool, Collections.emptyMap()); + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -93,8 +98,10 @@ public class RollupJobTaskTests extends ESTestCase { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, status, client, schedulerEngine, pool, Collections.emptyMap()); + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -106,8 +113,10 @@ public class RollupJobTaskTests extends ESTestCase { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, status, client, schedulerEngine, pool, Collections.emptyMap()); + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -119,12 +128,13 @@ public class RollupJobTaskTests extends ESTestCase { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, status, client, schedulerEngine, pool, Collections.emptyMap()); + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); - assertFalse(((RollupJobStatus) task.getStatus()).isUpgradedDocumentID()); } public void testInitialStatusIndexingNewID() { @@ -133,12 +143,13 @@ public class RollupJobTaskTests extends ESTestCase { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, status, client, schedulerEngine, pool, Collections.emptyMap()); + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); - assertTrue(((RollupJobStatus) task.getStatus()).isUpgradedDocumentID()); } public void testNoInitialStatus() { @@ -146,11 +157,12 @@ public class RollupJobTaskTests extends ESTestCase { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, null, client, schedulerEngine, pool, Collections.emptyMap()); + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); - assertTrue(((RollupJobStatus) task.getStatus()).isUpgradedDocumentID()); } public void testStartWhenStarted() throws InterruptedException { @@ -159,8 +171,10 @@ public class RollupJobTaskTests extends ESTestCase { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, status, client, schedulerEngine, pool, Collections.emptyMap()); + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STARTED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -190,8 +204,9 @@ public class RollupJobTaskTests extends ESTestCase { SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); AtomicInteger counter = new AtomicInteger(0); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, - null, client, schedulerEngine, pool, Collections.emptyMap()) { + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, + null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override public void updatePersistentTaskState(PersistentTaskState taskState, ActionListener> listener) { @@ -209,6 +224,7 @@ public class RollupJobTaskTests extends ESTestCase { counter.incrementAndGet(); } }; + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); @@ -267,7 +283,8 @@ public class RollupJobTaskTests extends ESTestCase { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, status, client, schedulerEngine, pool, Collections.emptyMap()) { @Override public void updatePersistentTaskState(PersistentTaskState taskState, @@ -278,6 +295,7 @@ public class RollupJobTaskTests extends ESTestCase { new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } }; + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -305,7 +323,8 @@ public class RollupJobTaskTests extends ESTestCase { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, status, client, schedulerEngine, pool, Collections.emptyMap()) { @Override public void updatePersistentTaskState(PersistentTaskState taskState, @@ -316,6 +335,7 @@ public class RollupJobTaskTests extends ESTestCase { new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } }; + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertThat(((RollupJobStatus)task.getStatus()).getPosition().size(), equalTo(1)); assertTrue(((RollupJobStatus)task.getStatus()).getPosition().containsKey("foo")); @@ -346,7 +366,8 @@ public class RollupJobTaskTests extends ESTestCase { when(client.settings()).thenReturn(Settings.EMPTY); when(client.threadPool()).thenReturn(pool); SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override public void updatePersistentTaskState(PersistentTaskState taskState, @@ -357,6 +378,7 @@ public class RollupJobTaskTests extends ESTestCase { new PersistentTasksCustomMetaData.Assignment("foo", "foo"))); } }; + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); @@ -414,7 +436,8 @@ public class RollupJobTaskTests extends ESTestCase { }).when(client).execute(anyObject(), anyObject(), anyObject()); SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override public void updatePersistentTaskState(PersistentTaskState taskState, @@ -435,6 +458,7 @@ public class RollupJobTaskTests extends ESTestCase { } }; + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); @@ -502,7 +526,8 @@ public class RollupJobTaskTests extends ESTestCase { }).when(client).execute(anyObject(), anyObject(), anyObject()); SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override public void updatePersistentTaskState(PersistentTaskState taskState, @@ -523,6 +548,7 @@ public class RollupJobTaskTests extends ESTestCase { } }; + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); @@ -591,7 +617,8 @@ public class RollupJobTaskTests extends ESTestCase { SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); RollupJobStatus status = new RollupJobStatus(IndexerState.STOPPED, null, false); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, status, client, schedulerEngine, pool, Collections.emptyMap()) { @Override public void updatePersistentTaskState(PersistentTaskState taskState, @@ -612,6 +639,7 @@ public class RollupJobTaskTests extends ESTestCase { } }; + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); @@ -646,8 +674,10 @@ public class RollupJobTaskTests extends ESTestCase { Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); SchedulerEngine schedulerEngine = new SchedulerEngine(SETTINGS, Clock.systemUTC()); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, status, client, schedulerEngine, pool, Collections.emptyMap()); + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); CountDownLatch latch = new CountDownLatch(1); @@ -674,7 +704,8 @@ public class RollupJobTaskTests extends ESTestCase { SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); AtomicInteger counter = new AtomicInteger(0); - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, null, client, schedulerEngine, pool, Collections.emptyMap()) { @Override public void updatePersistentTaskState(PersistentTaskState taskState, @@ -696,6 +727,7 @@ public class RollupJobTaskTests extends ESTestCase { } }; + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); assertNull(((RollupJobStatus)task.getStatus()).getPosition()); @@ -760,13 +792,15 @@ public class RollupJobTaskTests extends ESTestCase { // the task would end before stop could be called. But to help test out all pathways, // just in case, we can override markAsCompleted so it's a no-op and test how stop // handles the situation - RollupJobTask task = new RollupJobTask(1, "type", "action", new TaskId("node", 123), job, + TaskId taskId = new TaskId("node", 123); + RollupJobTask task = new RollupJobTask(1, "type", "action", taskId, job, status, client, schedulerEngine, pool, Collections.emptyMap()) { @Override public void markAsCompleted() { latch.countDown(); } }; + task.init(null, mock(TaskManager.class), taskId.toString(), 123); assertThat(((RollupJobStatus)task.getStatus()).getIndexerState(), equalTo(IndexerState.STOPPED)); task.onCancelled(); diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java index a100afe33aa..9cf870bbdf0 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java @@ -529,7 +529,7 @@ public class CertificateGenerateTool extends EnvironmentAwareCommand { terminal.println(" the certificate and private key will also be included in the output file."); } terminal.println("* Information about each instance"); - terminal.println(" * An instance is any piece of the Elastic Stack that requires a SSL certificate."); + terminal.println(" * An instance is any piece of the Elastic Stack that requires an SSL certificate."); terminal.println(" Depending on your configuration, Elasticsearch, Logstash, Kibana, and Beats"); terminal.println(" may all require a certificate and private key."); terminal.println(" * The minimum required value for each instance is a name. This can simply be the"); diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java index 53e3fadf168..e3a0f4e7112 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java @@ -149,7 +149,7 @@ public class CertificateTool extends LoggingAwareMultiCommand { "signing requests for use with SSL/TLS in the Elastic stack."; static final String INSTANCE_EXPLANATION = - " * An instance is any piece of the Elastic Stack that requires a SSL certificate.\n" + + " * An instance is any piece of the Elastic Stack that requires an SSL certificate.\n" + " Depending on your configuration, Elasticsearch, Logstash, Kibana, and Beats\n" + " may all require a certificate and private key.\n" + " * The minimum required value for each instance is a name. This can simply be the\n" + diff --git a/x-pack/plugin/security/licenses/httpclient-cache-4.5.10.jar.sha1 b/x-pack/plugin/security/licenses/httpclient-cache-4.5.10.jar.sha1 new file mode 100644 index 00000000000..3b1d8db6431 --- /dev/null +++ b/x-pack/plugin/security/licenses/httpclient-cache-4.5.10.jar.sha1 @@ -0,0 +1 @@ +b195778247a21e980cb9f80c41364dc0c38feaef \ No newline at end of file diff --git a/x-pack/plugin/security/licenses/httpclient-cache-4.5.8.jar.sha1 b/x-pack/plugin/security/licenses/httpclient-cache-4.5.8.jar.sha1 deleted file mode 100644 index 87db7aba09e..00000000000 --- a/x-pack/plugin/security/licenses/httpclient-cache-4.5.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bb984b73da2153285b660f3e278498abd94ccbb5 \ No newline at end of file diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 781feadabdc..b74e03bacc6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -337,15 +337,16 @@ public class ApiKeyService { } if (credentials != null) { + final String docId = credentials.getId(); final GetRequest getRequest = client - .prepareGet(SECURITY_MAIN_ALIAS, SINGLE_MAPPING_NAME, credentials.getId()) + .prepareGet(SECURITY_MAIN_ALIAS, SINGLE_MAPPING_NAME, docId) .setFetchSource(true) .request(); executeAsyncWithOrigin(ctx, SECURITY_ORIGIN, getRequest, ActionListener.wrap(response -> { if (response.isExists()) { try (ApiKeyCredentials ignore = credentials) { final Map source = response.getSource(); - validateApiKeyCredentials(source, credentials, clock, listener); + validateApiKeyCredentials(docId, source, credentials, clock, listener); } } else { credentials.close(); @@ -440,17 +441,22 @@ public class ApiKeyService { /** * Validates the ApiKey using the source map + * @param docId the identifier of the document that was retrieved from the security index * @param source the source map from a get of the ApiKey document * @param credentials the credentials provided by the user * @param listener the listener to notify after verification */ - void validateApiKeyCredentials(Map source, ApiKeyCredentials credentials, Clock clock, + void validateApiKeyCredentials(String docId, Map source, ApiKeyCredentials credentials, Clock clock, ActionListener listener) { + final String docType = (String) source.get("doc_type"); final Boolean invalidated = (Boolean) source.get("api_key_invalidated"); - if (invalidated == null) { - listener.onResponse(AuthenticationResult.terminate("api key document is missing invalidated field", null)); + if ("api_key".equals(docType) == false) { + listener.onResponse( + AuthenticationResult.unsuccessful("document [" + docId + "] is [" + docType + "] not an api key", null)); + } else if (invalidated == null) { + listener.onResponse(AuthenticationResult.unsuccessful("api key document is missing invalidated field", null)); } else if (invalidated) { - listener.onResponse(AuthenticationResult.terminate("api key has been invalidated", null)); + listener.onResponse(AuthenticationResult.unsuccessful("api key has been invalidated", null)); } else { final String apiKeyHash = (String) source.get("api_key_hash"); if (apiKeyHash == null) { @@ -484,7 +490,7 @@ public class ApiKeyService { listener.onResponse(AuthenticationResult.unsuccessful("invalid credentials", null)); } else { apiKeyAuthCache.invalidate(credentials.getId(), listenableCacheEntry); - validateApiKeyCredentials(source, credentials, clock, listener); + validateApiKeyCredentials(docId, source, credentials, clock, listener); } }, listener::onFailure), threadPool.generic(), threadPool.getThreadContext()); @@ -534,7 +540,7 @@ public class ApiKeyService { authResultMetadata.put(API_KEY_ID_KEY, credentials.getId()); listener.onResponse(AuthenticationResult.success(apiKeyUser, authResultMetadata)); } else { - listener.onResponse(AuthenticationResult.terminate("api key is expired", null)); + listener.onResponse(AuthenticationResult.unsuccessful("api key is expired", null)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java index 6961613fe31..c783842ba6f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.security.authc; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.CountDown; @@ -184,6 +185,7 @@ public class Realms implements Iterable { Set internalTypes = new HashSet<>(); List realms = new ArrayList<>(); List kerberosRealmNames = new ArrayList<>(); + Map> nameToRealmIdentifier = new HashMap<>(); for (RealmConfig.RealmIdentifier identifier: realmsSettings.keySet()) { Realm.Factory factory = factories.get(identifier.getType()); if (factory == null) { @@ -213,7 +215,10 @@ public class Realms implements Iterable { "configured"); } } - realms.add(factory.create(config)); + Realm realm = factory.create(config); + nameToRealmIdentifier.computeIfAbsent(realm.name(), k -> + new HashSet<>()).add(RealmSettings.realmSettingPrefix(realm.type()) + realm.name()); + realms.add(realm); } if (!realms.isEmpty()) { @@ -224,6 +229,13 @@ public class Realms implements Iterable { } // always add built in first! realms.add(0, reservedRealm); + String duplicateRealms = nameToRealmIdentifier.entrySet().stream() + .filter(entry -> entry.getValue().size() > 1) + .map(entry -> entry.getKey() + ": " + entry.getValue()) + .collect(Collectors.joining("; ")); + if (Strings.hasText(duplicateRealms)) { + throw new IllegalArgumentException("Found multiple realms configured with the same name: " + duplicateRealms + ""); + } return realms; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java index 308fbedb0f2..2097c5176d1 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationUtils.java @@ -19,7 +19,7 @@ import java.util.function.Consumer; import java.util.function.Predicate; import static org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction.TASKS_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.DATA_FRAME_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.DEPRECATION_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.INDEX_LIFECYCLE_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; @@ -106,7 +106,7 @@ public final class AuthorizationUtils { case WATCHER_ORIGIN: case ML_ORIGIN: case MONITORING_ORIGIN: - case DATA_FRAME_ORIGIN: + case TRANSFORM_ORIGIN: case DEPRECATION_ORIGIN: case PERSISTENT_TASK_ORIGIN: case ROLLUP_ORIGIN: diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java index 671a94452fa..116c0a18c47 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/NativeRealmIntegTestCase.java @@ -9,7 +9,9 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.transport.netty4.Netty4Transport; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.core.security.user.APMSystemUser; @@ -63,6 +65,16 @@ public abstract class NativeRealmIntegTestCase extends SecurityIntegTestCase { return templates; } + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + // we are randomly running a large number of nodes in these tests so we limit the number of worker threads + // since the default of 2 * CPU count might use up too much direct memory for thread-local direct buffers for each node's + // transport threads + builder.put(Netty4Transport.WORKER_COUNT.getKey(), random().nextInt(3) + 1); + return builder.build(); + } + private SecureString reservedPassword = SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING; protected SecureString getReservedPassword() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java index 6278221a362..c18bcb10996 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ClusterServiceUtils; @@ -143,7 +144,7 @@ public class TransportOpenIdConnectLogoutActionTests extends OpenIdConnectTestCa ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; indexRequests.add(indexRequest); final IndexResponse response = new IndexResponse( - indexRequest.shardId(), indexRequest.type(), indexRequest.id(), 1, 1, 1, true); + new ShardId("test", "test", 0), indexRequest.type(), indexRequest.id(), 1, 1, 1, true); listener.onResponse(response); return Void.TYPE; }).when(client).execute(eq(IndexAction.INSTANCE), any(IndexRequest.class), any(ActionListener.class)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index c5ed4365eff..f0337a7a72b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -149,7 +150,7 @@ public class TransportSamlInvalidateSessionActionTests extends SamlTestCase { IndexRequest indexRequest = (IndexRequest) request; indexRequests.add(indexRequest); final IndexResponse response = new IndexResponse( - indexRequest.shardId(), indexRequest.type(), indexRequest.id(), 1, 1, 1, true); + new ShardId("test", "test", 0), indexRequest.type(), indexRequest.id(), 1, 1, 1, true); listener.onResponse((Response) response); } else if (BulkAction.NAME.equals(action.name())) { assertThat(request, instanceOf(BulkRequest.class)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index 9b9dc79a29c..488f36ea4f3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ClusterServiceUtils; @@ -170,7 +171,7 @@ public class TransportSamlLogoutActionTests extends SamlTestCase { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; indexRequests.add(indexRequest); final IndexResponse response = new IndexResponse( - indexRequest.shardId(), indexRequest.type(), indexRequest.id(), 1, 1, 1, true); + new ShardId("test", "test", 0), indexRequest.type(), indexRequest.id(), 1, 1, 1, true); listener.onResponse(response); return Void.TYPE; }).when(client).index(any(IndexRequest.class), any(ActionListener.class)); @@ -179,7 +180,7 @@ public class TransportSamlLogoutActionTests extends SamlTestCase { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; indexRequests.add(indexRequest); final IndexResponse response = new IndexResponse( - indexRequest.shardId(), indexRequest.type(), indexRequest.id(), 1, 1, 1, true); + new ShardId("test", "test", 0), indexRequest.type(), indexRequest.id(), 1, 1, 1, true); listener.onResponse(response); return Void.TYPE; }).when(client).execute(eq(IndexAction.INSTANCE), any(IndexRequest.class), any(ActionListener.class)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 031f5ccec06..88cbc0a8069 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -45,6 +45,7 @@ import org.junit.Before; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.time.Clock; +import java.time.Duration; import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.Arrays; @@ -56,6 +57,7 @@ import java.util.Map; import static org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR; import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -159,25 +161,110 @@ public class ApiKeyServiceTests extends ESTestCase { assertThat(auth.getUser(), nullValue()); } - public void mockKeyDocument(ApiKeyService service, String id, String key, User user) throws IOException { - final Authentication authentication = new Authentication(user, new RealmRef("realm1", "native", "node01"), null, Version.CURRENT); - final XContentBuilder docSource = service.newDocument(new SecureString(key.toCharArray()), "test", authentication, - Collections.singleton(SUPERUSER_ROLE_DESCRIPTOR), Instant.now(), Instant.now().plusSeconds(3600), null, Version.CURRENT); + public void testAuthenticationFailureWithInvalidatedApiKey() throws Exception { + final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build(); + final ApiKeyService service = createApiKeyService(settings); + final String id = randomAlphaOfLength(12); + final String key = randomAlphaOfLength(16); + + mockKeyDocument(service, id, key, new User("hulk", "superuser"), true, Duration.ofSeconds(3600)); + + final AuthenticationResult auth = tryAuthenticate(service, id, key); + assertThat(auth.getStatus(), is(AuthenticationResult.Status.CONTINUE)); + assertThat(auth.getUser(), nullValue()); + assertThat(auth.getMessage(), containsString("invalidated")); + } + + public void testAuthenticationFailureWithInvalidCredentials() throws Exception { + final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build(); + final ApiKeyService service = createApiKeyService(settings); + + final String id = randomAlphaOfLength(12); + final String realKey = randomAlphaOfLength(16); + final String wrongKey = "#" + realKey.substring(1); + + mockKeyDocument(service, id, realKey, new User("hulk", "superuser")); + + final AuthenticationResult auth = tryAuthenticate(service, id, wrongKey); + assertThat(auth.getStatus(), is(AuthenticationResult.Status.CONTINUE)); + assertThat(auth.getUser(), nullValue()); + assertThat(auth.getMessage(), containsString("invalid credentials")); + } + + public void testAuthenticationFailureWithExpiredKey() throws Exception { + final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build(); + final ApiKeyService service = createApiKeyService(settings); + + final String id = randomAlphaOfLength(12); + final String key = randomAlphaOfLength(16); + + mockKeyDocument(service, id, key, new User("hulk", "superuser"), false, Duration.ofSeconds(-1)); + + final AuthenticationResult auth = tryAuthenticate(service, id, key); + assertThat(auth.getStatus(), is(AuthenticationResult.Status.CONTINUE)); + assertThat(auth.getUser(), nullValue()); + assertThat(auth.getMessage(), containsString("expired")); + } + + /** + * We cache valid and invalid responses. This test verifies that we handle these correctly. + */ + public void testMixingValidAndInvalidCredentials() throws Exception { + final Settings settings = Settings.builder().put(XPackSettings.API_KEY_SERVICE_ENABLED_SETTING.getKey(), true).build(); + final ApiKeyService service = createApiKeyService(settings); + + final String id = randomAlphaOfLength(12); + final String realKey = randomAlphaOfLength(16); + + mockKeyDocument(service, id, realKey, new User("hulk", "superuser")); + + for (int i = 0; i < 3; i++) { + final String wrongKey = "=" + randomAlphaOfLength(14) + "@"; + AuthenticationResult auth = tryAuthenticate(service, id, wrongKey); + assertThat(auth.getStatus(), is(AuthenticationResult.Status.CONTINUE)); + assertThat(auth.getUser(), nullValue()); + assertThat(auth.getMessage(), containsString("invalid credentials")); + + auth = tryAuthenticate(service, id, realKey); + assertThat(auth.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + assertThat(auth.getUser(), notNullValue()); + assertThat(auth.getUser().principal(), is("hulk")); + } + } + + private void mockKeyDocument(ApiKeyService service, String id, String key, User user) throws IOException { + mockKeyDocument(service, id, key, user, false, Duration.ofSeconds(3600)); + } + + private void mockKeyDocument(ApiKeyService service, String id, String key, User user, boolean invalidated, + Duration expiry) throws IOException { + final Authentication authentication = new Authentication(user, new RealmRef("realm1", "native", + "node01"), null, Version.CURRENT); + XContentBuilder docSource = service.newDocument(new SecureString(key.toCharArray()), "test", authentication, + Collections.singleton(SUPERUSER_ROLE_DESCRIPTOR), Instant.now(), Instant.now().plus(expiry), null, + Version.CURRENT); + if (invalidated) { + Map map = XContentHelper.convertToMap(BytesReference.bytes(docSource), true, XContentType.JSON).v2(); + map.put("api_key_invalidated", true); + docSource = XContentBuilder.builder(XContentType.JSON.xContent()).map(map); + } SecurityMocks.mockGetRequest(client, id, BytesReference.bytes(docSource)); } private AuthenticationResult tryAuthenticate(ApiKeyService service, String id, String key) throws Exception { final ThreadContext threadContext = threadPool.getThreadContext(); - final String header = "ApiKey " + Base64.getEncoder().encodeToString((id + ":" + key).getBytes(StandardCharsets.UTF_8)); - threadContext.putHeader("Authorization", header); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + final String header = "ApiKey " + Base64.getEncoder().encodeToString((id + ":" + key).getBytes(StandardCharsets.UTF_8)); + threadContext.putHeader("Authorization", header); - final PlainActionFuture future = new PlainActionFuture<>(); - service.authenticateWithApiKeyIfPresent(threadContext, future); + final PlainActionFuture future = new PlainActionFuture<>(); + service.authenticateWithApiKeyIfPresent(threadContext, future); - final AuthenticationResult auth = future.get(); - assertThat(auth, notNullValue()); - return auth; + final AuthenticationResult auth = future.get(); + assertThat(auth, notNullValue()); + return auth; + } } public void testValidateApiKey() throws Exception { @@ -186,6 +273,7 @@ public class ApiKeyServiceTests extends ESTestCase { final char[] hash = hasher.hash(new SecureString(apiKey.toCharArray())); Map sourceMap = new HashMap<>(); + sourceMap.put("doc_type", "api_key"); sourceMap.put("api_key_hash", new String(hash)); sourceMap.put("role_descriptors", Collections.singletonMap("a role", Collections.singletonMap("cluster", "all"))); sourceMap.put("limited_by_role_descriptors", Collections.singletonMap("limited role", Collections.singletonMap("cluster", "all"))); @@ -200,7 +288,7 @@ public class ApiKeyServiceTests extends ESTestCase { ApiKeyService.ApiKeyCredentials creds = new ApiKeyService.ApiKeyCredentials(randomAlphaOfLength(12), new SecureString(apiKey.toCharArray())); PlainActionFuture future = new PlainActionFuture<>(); - service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); + service.validateApiKeyCredentials(creds.getId(), sourceMap, creds, Clock.systemUTC(), future); AuthenticationResult result = future.get(); assertNotNull(result); assertTrue(result.isAuthenticated()); @@ -214,7 +302,7 @@ public class ApiKeyServiceTests extends ESTestCase { sourceMap.put("expiration_time", Clock.systemUTC().instant().plus(1L, ChronoUnit.HOURS).toEpochMilli()); future = new PlainActionFuture<>(); - service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); + service.validateApiKeyCredentials(creds.getId(), sourceMap, creds, Clock.systemUTC(), future); result = future.get(); assertNotNull(result); assertTrue(result.isAuthenticated()); @@ -228,7 +316,7 @@ public class ApiKeyServiceTests extends ESTestCase { sourceMap.put("expiration_time", Clock.systemUTC().instant().minus(1L, ChronoUnit.HOURS).toEpochMilli()); future = new PlainActionFuture<>(); - service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); + service.validateApiKeyCredentials(creds.getId(), sourceMap, creds, Clock.systemUTC(), future); result = future.get(); assertNotNull(result); assertFalse(result.isAuthenticated()); @@ -236,7 +324,7 @@ public class ApiKeyServiceTests extends ESTestCase { sourceMap.remove("expiration_time"); creds = new ApiKeyService.ApiKeyCredentials(randomAlphaOfLength(12), new SecureString(randomAlphaOfLength(15).toCharArray())); future = new PlainActionFuture<>(); - service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); + service.validateApiKeyCredentials(creds.getId(), sourceMap, creds, Clock.systemUTC(), future); result = future.get(); assertNotNull(result); assertFalse(result.isAuthenticated()); @@ -244,7 +332,7 @@ public class ApiKeyServiceTests extends ESTestCase { sourceMap.put("api_key_invalidated", true); creds = new ApiKeyService.ApiKeyCredentials(randomAlphaOfLength(12), new SecureString(randomAlphaOfLength(15).toCharArray())); future = new PlainActionFuture<>(); - service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); + service.validateApiKeyCredentials(creds.getId(), sourceMap, creds, Clock.systemUTC(), future); result = future.get(); assertNotNull(result); assertFalse(result.isAuthenticated()); @@ -344,6 +432,7 @@ public class ApiKeyServiceTests extends ESTestCase { final char[] hash = hasher.hash(new SecureString(apiKey.toCharArray())); Map sourceMap = new HashMap<>(); + sourceMap.put("doc_type", "api_key"); sourceMap.put("api_key_hash", new String(hash)); sourceMap.put("role_descriptors", Collections.singletonMap("a role", Collections.singletonMap("cluster", "all"))); sourceMap.put("limited_by_role_descriptors", Collections.singletonMap("limited role", Collections.singletonMap("cluster", "all"))); @@ -356,7 +445,7 @@ public class ApiKeyServiceTests extends ESTestCase { ApiKeyService service = createApiKeyService(Settings.EMPTY); ApiKeyCredentials creds = new ApiKeyCredentials(randomAlphaOfLength(12), new SecureString(apiKey.toCharArray())); PlainActionFuture future = new PlainActionFuture<>(); - service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); + service.validateApiKeyCredentials(creds.getId(), sourceMap, creds, Clock.systemUTC(), future); AuthenticationResult result = future.actionGet(); assertThat(result.isAuthenticated(), is(true)); CachedApiKeyHashResult cachedApiKeyHashResult = service.getFromCache(creds.getId()); @@ -365,7 +454,7 @@ public class ApiKeyServiceTests extends ESTestCase { creds = new ApiKeyCredentials(creds.getId(), new SecureString("foobar".toCharArray())); future = new PlainActionFuture<>(); - service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); + service.validateApiKeyCredentials(creds.getId(), sourceMap, creds, Clock.systemUTC(), future); result = future.actionGet(); assertThat(result.isAuthenticated(), is(false)); final CachedApiKeyHashResult shouldBeSame = service.getFromCache(creds.getId()); @@ -375,7 +464,7 @@ public class ApiKeyServiceTests extends ESTestCase { sourceMap.put("api_key_hash", new String(hasher.hash(new SecureString("foobar".toCharArray())))); creds = new ApiKeyCredentials(randomAlphaOfLength(12), new SecureString("foobar1".toCharArray())); future = new PlainActionFuture<>(); - service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); + service.validateApiKeyCredentials(creds.getId(), sourceMap, creds, Clock.systemUTC(), future); result = future.actionGet(); assertThat(result.isAuthenticated(), is(false)); cachedApiKeyHashResult = service.getFromCache(creds.getId()); @@ -384,7 +473,7 @@ public class ApiKeyServiceTests extends ESTestCase { creds = new ApiKeyCredentials(creds.getId(), new SecureString("foobar2".toCharArray())); future = new PlainActionFuture<>(); - service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); + service.validateApiKeyCredentials(creds.getId(), sourceMap, creds, Clock.systemUTC(), future); result = future.actionGet(); assertThat(result.isAuthenticated(), is(false)); assertThat(service.getFromCache(creds.getId()), not(sameInstance(cachedApiKeyHashResult))); @@ -392,7 +481,7 @@ public class ApiKeyServiceTests extends ESTestCase { creds = new ApiKeyCredentials(creds.getId(), new SecureString("foobar".toCharArray())); future = new PlainActionFuture<>(); - service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); + service.validateApiKeyCredentials(creds.getId(), sourceMap, creds, Clock.systemUTC(), future); result = future.actionGet(); assertThat(result.isAuthenticated(), is(true)); assertThat(service.getFromCache(creds.getId()), not(sameInstance(cachedApiKeyHashResult))); @@ -408,6 +497,7 @@ public class ApiKeyServiceTests extends ESTestCase { .build(); Map sourceMap = new HashMap<>(); + sourceMap.put("doc_type", "api_key"); sourceMap.put("api_key_hash", new String(hash)); sourceMap.put("role_descriptors", Collections.singletonMap("a role", Collections.singletonMap("cluster", "all"))); sourceMap.put("limited_by_role_descriptors", Collections.singletonMap("limited role", Collections.singletonMap("cluster", "all"))); @@ -420,7 +510,7 @@ public class ApiKeyServiceTests extends ESTestCase { ApiKeyService service = createApiKeyService(settings); ApiKeyCredentials creds = new ApiKeyCredentials(randomAlphaOfLength(12), new SecureString(apiKey.toCharArray())); PlainActionFuture future = new PlainActionFuture<>(); - service.validateApiKeyCredentials(sourceMap, creds, Clock.systemUTC(), future); + service.validateApiKeyCredentials(creds.getId(), sourceMap, creds, Clock.systemUTC(), future); AuthenticationResult result = future.actionGet(); assertThat(result.isAuthenticated(), is(true)); CachedApiKeyHashResult cachedApiKeyHashResult = service.getFromCache(creds.getId()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index e914e5a416c..955f24d6f26 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -183,8 +183,11 @@ public class AuthenticationServiceTests extends ESTestCase { when(licenseState.isAuthAllowed()).thenReturn(true); when(licenseState.isApiKeyServiceAllowed()).thenReturn(true); when(licenseState.isTokenServiceAllowed()).thenReturn(true); + ReservedRealm reservedRealm = mock(ReservedRealm.class); + when(reservedRealm.type()).thenReturn("reserved"); + when(reservedRealm.name()).thenReturn("reserved_realm"); realms = spy(new TestRealms(Settings.EMPTY, TestEnvironment.newEnvironment(settings), Collections.emptyMap(), - licenseState, threadContext, mock(ReservedRealm.class), Arrays.asList(firstRealm, secondRealm), + licenseState, threadContext, reservedRealm, Arrays.asList(firstRealm, secondRealm), Collections.singletonList(firstRealm))); auditTrail = mock(AuditTrailService.class); @@ -1305,7 +1308,6 @@ public class AuthenticationServiceTests extends ESTestCase { threadContext.putHeader("Authorization", headerValue); ElasticsearchSecurityException e = expectThrows(ElasticsearchSecurityException.class, () -> authenticateBlocking("_action", message, null)); - assertThat(e.getMessage(), containsString("api key is expired")); assertEquals(RestStatus.UNAUTHORIZED, e.status()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java index 0cee62879fb..bce2e96bffd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -76,6 +76,7 @@ public class RealmsTests extends ESTestCase { when(licenseState.isAuthAllowed()).thenReturn(true); when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.ALL); when(reservedRealm.type()).thenReturn(ReservedRealm.TYPE); + when(reservedRealm.name()).thenReturn("reserved"); } public void testWithSettings() throws Exception { @@ -170,6 +171,20 @@ public class RealmsTests extends ESTestCase { } } + public void testWithSettingsWithMultipleRealmsWithSameName() throws Exception { + Settings settings = Settings.builder() + .put("xpack.security.authc.realms.file.realm_1.order", 0) + .put("xpack.security.authc.realms.native.realm_1.order", 1) + .put("xpack.security.authc.realms.kerberos.realm_1.order", 2) + .put("path.home", createTempDir()) + .build(); + Environment env = TestEnvironment.newEnvironment(settings); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->{ + new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); + }); + assertThat(e.getMessage(), containsString("Found multiple realms configured with the same name")); + } + public void testWithEmptySettings() throws Exception { Realms realms = new Realms(Settings.EMPTY, TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()), factories, licenseState, threadContext, reservedRealm); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java index b9a557320e3..4566715d6f4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/SecurityRealmSettingsTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.authc; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; @@ -127,6 +128,7 @@ public class SecurityRealmSettingsTests extends SecurityIntegTestCase { } public void testClusterStarted() { + assumeFalse("https://github.com/elastic/elasticsearch/issues/44942", Constants.WINDOWS); final AuthenticateRequest request = new AuthenticateRequest(); request.username(nodeClientUsername()); final AuthenticateResponse authenticate = client().execute(AuthenticateAction.INSTANCE, request).actionGet(10, TimeUnit.SECONDS); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java index 2598b9da550..44498e0ae97 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapUserSearchSessionFactoryTests.java @@ -59,7 +59,7 @@ public class LdapUserSearchSessionFactoryTests extends LdapTestCase { Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); /* * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. - * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * If we re-use an SSLContext, previously connected sessions can get re-established which breaks hostname * verification tests since a re-established connection does not perform hostname verification. */ diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java index 9661099c55b..6002ef14968 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcResultSet.java @@ -37,8 +37,8 @@ import static org.elasticsearch.xpack.sql.jdbc.EsType.DATE; import static org.elasticsearch.xpack.sql.jdbc.EsType.DATETIME; import static org.elasticsearch.xpack.sql.jdbc.EsType.TIME; import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.asDateTimeField; -import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.dateTimeAsMillisSinceEpoch; import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.asTimestamp; +import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.dateTimeAsMillisSinceEpoch; import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.timeAsMillisSinceEpoch; import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.timeAsTime; import static org.elasticsearch.xpack.sql.jdbc.JdbcDateUtils.timeAsTimestamp; @@ -57,6 +57,7 @@ class JdbcResultSet implements ResultSet, JdbcWrapper { private boolean closed = false; private boolean wasNull = false; + private boolean wasLast = false; private int rowNumber; @@ -78,10 +79,13 @@ class JdbcResultSet implements ResultSet, JdbcWrapper { if (columnIndex < 1 || columnIndex > cursor.columnSize()) { throw new SQLException("Invalid column index [" + columnIndex + "]"); } + if (wasLast == true || rowNumber < 1) { + throw new SQLException("No row available"); + } Object object = null; try { object = cursor.column(columnIndex - 1); - } catch (IllegalArgumentException iae) { + } catch (Exception iae) { throw new SQLException(iae.getMessage()); } wasNull = (object == null); @@ -114,6 +118,7 @@ class JdbcResultSet implements ResultSet, JdbcWrapper { rowNumber++; return true; } + wasLast = true; return false; } @@ -461,7 +466,7 @@ class JdbcResultSet implements ResultSet, JdbcWrapper { @Override public boolean isAfterLast() throws SQLException { - throw new SQLFeatureNotSupportedException("isAfterLast not supported"); + return rowNumber > 0 && wasLast; } @Override diff --git a/x-pack/plugin/sql/qa/build.gradle b/x-pack/plugin/sql/qa/build.gradle index f33fd4a4303..ee780b4d67c 100644 --- a/x-pack/plugin/sql/qa/build.gradle +++ b/x-pack/plugin/sql/qa/build.gradle @@ -47,7 +47,20 @@ forbiddenApisMain { thirdPartyAudit.enabled = false subprojects { - apply plugin: 'elasticsearch.standalone-rest-test' + if (subprojects.isEmpty()) { + // leaf project + apply plugin: 'elasticsearch.standalone-rest-test' + } else { + apply plugin: 'elasticsearch.build' + } + + configurations.testRuntimeClasspath { + resolutionStrategy.force "org.slf4j:slf4j-api:1.7.25" + } + configurations.testRuntime { + // This is also required to make resolveAllDependencies work + resolutionStrategy.force "org.slf4j:slf4j-api:1.7.25" + } dependencies { /* Since we're a standalone rest test we actually get transitive @@ -65,7 +78,8 @@ subprojects { // H2GIS testing dependencies testRuntime ("org.orbisgis:h2gis:${h2gisVersion}") { - exclude group: "org.locationtech.jts" + exclude group: "org.locationtech.jts" + exclude group: "com.fasterxml.jackson.core" } testRuntime project(path: xpackModule('sql:jdbc'), configuration: 'nodeps') diff --git a/x-pack/plugin/sql/qa/security/build.gradle b/x-pack/plugin/sql/qa/security/build.gradle index 2774c4b85f4..827559e3026 100644 --- a/x-pack/plugin/sql/qa/security/build.gradle +++ b/x-pack/plugin/sql/qa/security/build.gradle @@ -1,3 +1,4 @@ + dependencies { testCompile project(':x-pack:plugin:core') } @@ -6,27 +7,27 @@ Project mainProject = project group = "${group}.x-pack.qa.sql.security" +configurations.create('testArtifacts') + +TaskProvider testJar = tasks.register("testJar", Jar) { + appendix 'test' + from sourceSets.test.output +} + +artifacts { + testArtifacts testJar +} + // Tests are pushed down to subprojects and will be checked there. testingConventions.enabled = false subprojects { - // Use resources from the parent project in subprojects - sourceSets { - test { - mainProject.sourceSets.test.output.classesDirs.each { dir -> - output.addClassesDir { dir } - output.builtBy(mainProject.tasks.testClasses) - } - runtimeClasspath += mainProject.sourceSets.test.output - } - } - - processTestResources { - from mainProject.file('src/test/resources') - } + // Use tests from the root security qa project in subprojects + configurations.create('testArtifacts') dependencies { testCompile project(":x-pack:plugin:core") + testArtifacts project(path: mainProject.path, configuration: 'testArtifacts') } testClusters.integTest { @@ -42,10 +43,22 @@ subprojects { user username: "test_admin", password: "x-pack-test-password" } + File testArtifactsDir = project.file("$buildDir/testArtifacts") + TaskProvider copyTestClasses = tasks.register("copyTestClasses", Copy) { + dependsOn configurations.testArtifacts + from { zipTree(configurations.testArtifacts.singleFile) } + into testArtifactsDir + } + integTest.runner { + dependsOn copyTestClasses + testClassesDirs += project.files(testArtifactsDir) + classpath += configurations.testArtifacts nonInputProperties.systemProperty 'tests.audit.logfile', "${ -> testClusters.integTest.singleNode().getAuditLog()}" nonInputProperties.systemProperty 'tests.audit.yesterday.logfile', "${ -> testClusters.integTest.singleNode().getAuditLog().getParentFile()}/integTest_audit-${new Date().format('yyyy-MM-dd')}.json" } + + testingConventions.enabled = false } diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java index aaf028181a1..7340a1ab933 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java @@ -641,7 +641,9 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase { assertThat(log.containsKey("user.name"), is(true)); List indices = new ArrayList<>(); if (log.containsKey("indices")) { - indices = (ArrayList) log.get("indices"); + @SuppressWarnings("unchecked") + List castIndices = (ArrayList) log.get("indices"); + indices = castIndices; if ("test_admin".equals(log.get("user.name"))) { /* * Sometimes we accidentally sneak access to the security tables. This is fine, diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java index ff50a33a0af..d2dffe21e10 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DataLoader.java @@ -43,6 +43,27 @@ public class DataLoader { loadEmpDatasetIntoEs(client); } + public static void createEmptyIndex(RestClient client, String index) throws Exception { + Request request = new Request("PUT", "/" + index); + XContentBuilder createIndex = JsonXContent.contentBuilder().startObject(); + createIndex.startObject("settings"); + { + createIndex.field("number_of_shards", 1); + createIndex.field("number_of_replicas", 1); + } + createIndex.endObject(); + createIndex.startObject("mappings"); + { + createIndex.startObject("properties"); + { + } + createIndex.endObject(); + } + createIndex.endObject().endObject(); + request.setJsonEntity(Strings.toString(createIndex)); + client.performRequest(request); + } + protected static void loadEmpDatasetIntoEs(RestClient client) throws Exception { loadEmpDatasetIntoEs(client, "test_emp", "employees"); loadEmpDatasetWithExtraIntoEs(client, "test_emp_copy", "employees"); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DatabaseMetaDataTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DatabaseMetaDataTestCase.java index 08f56b00582..ddd768a639d 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DatabaseMetaDataTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/DatabaseMetaDataTestCase.java @@ -71,6 +71,24 @@ public class DatabaseMetaDataTestCase extends JdbcIntegrationTestCase { } } + public void testGetTablesForEmptyIndices() throws Exception { + DataLoader.createEmptyIndex(client(), "test_empty"); + DataLoader.createEmptyIndex(client(), "test_empty_again"); + + try (Connection h2 = LocalH2.anonymousDb(); Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_metadata_get_tables_empty.sql'"); + + CheckedSupplier all = () -> h2.createStatement() + .executeQuery("SELECT '" + clusterName() + "' AS TABLE_CAT, * FROM mock"); + assertResultSets(all.get(), es.getMetaData().getTables("%", "%", "%", null)); + assertResultSets(all.get(), es.getMetaData().getTables("%", "%", "te%", null)); + assertResultSets( + h2.createStatement() + .executeQuery("SELECT '" + clusterName() + "' AS TABLE_CAT, * FROM mock WHERE TABLE_NAME = 'test_empty'"), + es.getMetaData().getTables("%", "%", "test_empty", null)); + } + } + public void testGetTypeOfTables() throws Exception { index("test1", body -> body.field("name", "bob")); index("test2", body -> body.field("name", "bob")); @@ -121,4 +139,13 @@ public class DatabaseMetaDataTestCase extends JdbcIntegrationTestCase { assertResultSets(expected, es.getMetaData().getColumns(null, "%", "%", null)); } } + + public void testColumnsForEmptyTable() throws Exception { + try (Connection h2 = LocalH2.anonymousDb(); Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_metadata_get_columns_empty.sql'"); + + ResultSet expected = h2.createStatement().executeQuery("SELECT '" + clusterName() + "' AS TABLE_CAT, * FROM mock"); + assertResultSets(expected, es.getMetaData().getColumns(null, "%", "%", null)); + } + } } \ No newline at end of file diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java index ec9386d2d6e..fcdb3c65330 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcAssert.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.qa.jdbc; import com.carrotsearch.hppc.IntObjectHashMap; + import org.apache.logging.log4j.Logger; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Point; @@ -17,6 +18,7 @@ import org.elasticsearch.xpack.sql.proto.StringUtils; import org.relique.jdbc.csv.CsvResultSet; import java.io.IOException; +import java.sql.Date; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; @@ -37,6 +39,9 @@ import static java.sql.Types.INTEGER; import static java.sql.Types.REAL; import static java.sql.Types.SMALLINT; import static java.sql.Types.TINYINT; +import static java.time.ZoneOffset.UTC; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.logResultSetMetadata; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.resultSetCurrentData; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; @@ -107,7 +112,7 @@ public class JdbcAssert { ResultSetMetaData actualMeta = actual.getMetaData(); if (logger != null) { - JdbcTestUtils.logResultSetMetadata(actual, logger); + logResultSetMetadata(actual, logger); } if (expectedMeta.getColumnCount() != actualMeta.getColumnCount()) { @@ -210,7 +215,7 @@ public class JdbcAssert { assertTrue("Expected more data but no more entries found after [" + count + "]", actual.next()); if (logger != null) { - logger.info(JdbcTestUtils.resultSetCurrentData(actual)); + logger.info(resultSetCurrentData(actual)); } for (int column = 1; column <= columns; column++) { @@ -220,7 +225,7 @@ public class JdbcAssert { String columnClassName = metaData.getColumnClassName(column); // fix for CSV which returns the shortName not fully-qualified name - if (!columnClassName.contains(".")) { + if (columnClassName != null && !columnClassName.contains(".")) { switch (columnClassName) { case "Date": columnClassName = "java.sql.Date"; @@ -240,13 +245,17 @@ public class JdbcAssert { } } - expectedColumnClass = Class.forName(columnClassName); + if (columnClassName != null) { + expectedColumnClass = Class.forName(columnClassName); + } } catch (ClassNotFoundException cnfe) { throw new SQLException(cnfe); } Object expectedObject = expected.getObject(column); - Object actualObject = lenientDataType ? actual.getObject(column, expectedColumnClass) : actual.getObject(column); + Object actualObject = (lenientDataType && expectedColumnClass != null) + ? actual.getObject(column, expectedColumnClass) + : actual.getObject(column); String msg = format(Locale.ROOT, "Different result for column [%s], entry [%d]", metaData.getColumnName(column), count + 1); @@ -264,6 +273,10 @@ public class JdbcAssert { else if (type == Types.TIMESTAMP || type == Types.TIMESTAMP_WITH_TIMEZONE) { assertEquals(msg, expected.getTimestamp(column), actual.getTimestamp(column)); } + // then date + else if (type == Types.DATE) { + assertEquals(msg, convertDateToSystemTimezone(expected.getDate(column)), actual.getDate(column)); + } // and floats/doubles else if (type == Types.DOUBLE) { assertEquals(msg, (double) expectedObject, (double) actualObject, lenientFloatingNumbers ? 1d : 0.0d); @@ -301,14 +314,14 @@ public class JdbcAssert { } catch (AssertionError ae) { if (logger != null && actual.next()) { logger.info("^^^ Assertion failure ^^^"); - logger.info(JdbcTestUtils.resultSetCurrentData(actual)); + logger.info(resultSetCurrentData(actual)); } throw ae; } if (actual.next()) { fail("Elasticsearch [" + actual + "] still has data after [" + count + "] entries:\n" - + JdbcTestUtils.resultSetCurrentData(actual)); + + resultSetCurrentData(actual)); } } @@ -328,4 +341,9 @@ public class JdbcAssert { return columnType; } + + // Used to convert the DATE read from CSV file to a java.sql.Date at the System's timezone (-Dtests.timezone=XXXX) + private static Date convertDateToSystemTimezone(Date date) { + return new Date(date.toLocalDate().atStartOfDay(UTC).toInstant().toEpochMilli()); + } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java index 123f22073ae..8bfbcd370ec 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java @@ -194,7 +194,7 @@ final class JdbcTestUtils { } } // normal file access - else { + else if (Files.isDirectory(path)) { Files.walkFileTree(path, EnumSet.allOf(FileVisitOption.class), 1, new SimpleFileVisitor() { @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java index f938e719e8d..b842193efc9 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java @@ -34,6 +34,7 @@ import java.sql.Timestamp; import java.sql.Types; import java.time.Instant; import java.time.ZoneId; +import java.time.ZonedDateTime; import java.util.Arrays; import java.util.Calendar; import java.util.Date; @@ -1157,6 +1158,33 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase { assertEquals(expectedTimestamp, results.getObject("date", java.sql.Timestamp.class)); }); } + + public void testGetDateTypeFromAggregation() throws Exception { + createIndex("test"); + updateMapping("test", builder -> builder.startObject("test_date").field("type", "date").endObject()); + + // 1984-05-02 14:59:12 UTC + Long timeInMillis = 452357952000L; + index("test", "1", builder -> builder.field("test_date", timeInMillis)); + + doWithQueryAndTimezone("SELECT CONVERT(test_date, DATE) AS converted FROM test GROUP BY converted", "UTC", results -> { + results.next(); + ZonedDateTime zdt = ZonedDateTime.ofInstant(Instant.ofEpochMilli(timeInMillis), ZoneId.of("Z")) + .toLocalDate().atStartOfDay(ZoneId.of("Z")); + + java.sql.Date expectedDate = new java.sql.Date(zdt.toInstant().toEpochMilli()); + assertEquals(expectedDate, results.getDate("converted")); + assertEquals(expectedDate, results.getObject("converted", java.sql.Date.class)); + + java.sql.Time expectedTime = new java.sql.Time(0L); + assertEquals(expectedTime, results.getTime("converted")); + assertEquals(expectedTime, results.getObject("converted", java.sql.Time.class)); + + java.sql.Timestamp expectedTimestamp = new java.sql.Timestamp(zdt.toInstant().toEpochMilli()); + assertEquals(expectedTimestamp, results.getTimestamp("converted")); + assertEquals(expectedTimestamp, results.getObject("converted", java.sql.Timestamp.class)); + }); + } public void testGetTimeType() throws Exception { createIndex("test"); @@ -1421,6 +1449,34 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase { assertThrowsWritesUnsupportedForUpdate(() -> r.rowDeleted()); } + public void testResultSetNotInitialized() throws Exception { + createTestDataForNumericValueTypes(() -> randomInt()); + + SQLException sqle = expectThrows(SQLException.class, () -> { + doWithQuery(SELECT_WILDCARD, rs -> { + assertFalse(rs.isAfterLast()); + rs.getObject(1); + }); + }); + assertEquals("No row available", sqle.getMessage()); + } + + public void testResultSetConsumed() throws Exception { + createTestDataForNumericValueTypes(() -> randomInt()); + + SQLException sqle = expectThrows(SQLException.class, () -> { + doWithQuery("SELECT * FROM test LIMIT 1", rs -> { + assertFalse(rs.isAfterLast()); + assertTrue(rs.next()); + assertFalse(rs.isAfterLast()); + assertFalse(rs.next()); + assertTrue(rs.isAfterLast()); + rs.getObject(1); + }); + }); + assertEquals("No row available", sqle.getMessage()); + } + private void doWithQuery(String query, CheckedConsumer consumer) throws SQLException { doWithQuery(() -> esJdbc(timeZoneId), query, consumer); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ShowTablesTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ShowTablesTestCase.java index 4a6882685a9..af5446176bf 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ShowTablesTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ShowTablesTestCase.java @@ -37,4 +37,18 @@ public class ShowTablesTestCase extends JdbcIntegrationTestCase { assertResultSets(expected, es.createStatement().executeQuery("SHOW TABLES")); } } + + public void testEmptyIndex() throws Exception { + DataLoader.createEmptyIndex(client(), "test_empty"); + DataLoader.createEmptyIndex(client(), "test_empty_again"); + + try (Connection h2 = LocalH2.anonymousDb(); Connection es = esJdbc()) { + h2.createStatement().executeUpdate("RUNSCRIPT FROM 'classpath:/setup_mock_show_tables.sql'"); + h2.createStatement().executeUpdate("INSERT INTO mock VALUES ('test_empty', 'BASE TABLE', 'INDEX');"); + h2.createStatement().executeUpdate("INSERT INTO mock VALUES ('test_empty_again', 'BASE TABLE', 'INDEX');"); + + ResultSet expected = h2.createStatement().executeQuery("SELECT * FROM mock"); + assertResultSets(expected, es.createStatement().executeQuery("SHOW TABLES")); + } + } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java index 05ba49bbd0d..dd311b299fc 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/SpecBaseIntegrationTestCase.java @@ -66,7 +66,7 @@ public abstract class SpecBaseIntegrationTestCase extends JdbcIntegrationTestCas } protected void loadDataset(RestClient client) throws Exception { - DataLoader.loadEmpDatasetIntoEs(client); + DataLoader.loadDatasetIntoEs(client); } @Override diff --git a/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec b/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec index e24297f7fa9..8ebdcf88e85 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/agg.sql-spec @@ -423,6 +423,31 @@ SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING aggAvgWithMultipleHavingOnAliasAndFunction SELECT gender g, CAST(AVG(emp_no) AS FLOAT) a FROM "test_emp" GROUP BY g HAVING a > 10 AND AVG(emp_no) > 10000000 ORDER BY g ; +// Implicit grouping with filtering +implicitGroupingWithLiteralAndFiltering +SELECT 1 FROM test_emp HAVING COUNT(*) > 0; +implicitGroupingWithLiteralAliasAndFiltering +SELECT 1 AS l FROM test_emp HAVING COUNT(*) > 0; +implicitGroupingWithLiteralAndFilteringOnAlias +SELECT 1, COUNT(*) AS c FROM test_emp HAVING c > 0; +implicitGroupingWithLiteralAliasAndFilteringOnAlias +SELECT 1 AS l FROM test_emp HAVING COUNT(*) > 0; +implicitGroupingWithAggs +SELECT MAX(emp_no) AS m FROM test_emp HAVING COUNT(*) > 0; +implicitGroupingWithOptimizedAggs +SELECT MIN(emp_no) AS m FROM test_emp HAVING MAX(emp_no) > 0 AND COUNT(*) > 0; +implicitGroupingWithNull +SELECT NULL AS x FROM test_emp HAVING COUNT(1) > 1; +implicitGroupingWithNullFunction +SELECT LTRIM(CAST(YEAR(CAST(NULL AS DATE)) AS VARCHAR)) AS x FROM test_emp HAVING COUNT(1) > 1; +implicitGroupingWithNullDateTimeFunction +SELECT DAYNAME(CAST(NULL AS TIMESTAMP)) AS x FROM test_emp HAVING COUNT(1) > 1; +implicitGroupingWithScalarInsideCase +SELECT (CASE WHEN 'D' IS NULL THEN NULL WHEN 'D' IS NOT NULL THEN (LOCATE('D', 'Data') = 1) END) AS x FROM test_emp HAVING (COUNT(1) > 0); +implicitGroupingWithMultiLevelCase +SELECT (CASE WHEN ('Data' IS NULL) OR ('Xyz' IS NULL) THEN NULL WHEN 'Data' < 'Xyz' THEN 'Data' ELSE 'Xyz' END) AS x FROM test_emp HAVING (COUNT(1) > 0); + + // // GroupBy on Scalar plus Having // diff --git a/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec index 073788511d0..abb1175cb2f 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/command.csv-spec @@ -39,16 +39,17 @@ CURRENT_DATE |SCALAR CURRENT_TIME |SCALAR CURRENT_TIMESTAMP|SCALAR CURTIME |SCALAR +DATE_TRUNC |SCALAR DAY |SCALAR DAYNAME |SCALAR -DAYOFMONTH |SCALAR -DAYOFWEEK |SCALAR -DAYOFYEAR |SCALAR -DAY_NAME |SCALAR -DAY_OF_MONTH |SCALAR -DAY_OF_WEEK |SCALAR -DAY_OF_YEAR |SCALAR -DOM |SCALAR +DAYOFMONTH |SCALAR +DAYOFWEEK |SCALAR +DAYOFYEAR |SCALAR +DAY_NAME |SCALAR +DAY_OF_MONTH |SCALAR +DAY_OF_WEEK |SCALAR +DAY_OF_YEAR |SCALAR +DOM |SCALAR DOW |SCALAR DOY |SCALAR HOUR |SCALAR diff --git a/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec index 367b5d0ddfd..ad80d663b47 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/datetime.csv-spec @@ -121,6 +121,151 @@ SELECT WEEK(birth_date) week, birth_date FROM test_emp WHERE WEEK(birth_date) > 2 |1953-01-07T00:00:00.000Z ; +selectDateTruncWithDateTime +schema::dt_hour:ts|dt_min:ts|dt_sec:ts|dt_millis:s|dt_micro:s|dt_nano:s +SELECT DATE_TRUNC('hour', '2019-09-04T11:22:33.123Z'::datetime) as dt_hour, DATE_TRUNC('minute', '2019-09-04T11:22:33.123Z'::datetime) as dt_min, +DATE_TRUNC('seconds', '2019-09-04T11:22:33.123Z'::datetime) as dt_sec, DATE_TRUNC('ms', '2019-09-04T11:22:33.123Z'::datetime)::string as dt_millis, +DATE_TRUNC('mcs', '2019-09-04T11:22:33.123Z'::datetime)::string as dt_micro, DATE_TRUNC('nanoseconds', '2019-09-04T11:22:33.123Z'::datetime)::string as dt_nano; + + dt_hour | dt_min | dt_sec | dt_millis | dt_micro | dt_nano +-------------------------+---------------------------+--------------------------+--------------------------+--------------------------+------------------------- +2019-09-04T11:00:00.000Z | 2019-09-04T11:22:00.000Z | 2019-09-04T11:22:33.000Z | 2019-09-04T11:22:33.123Z | 2019-09-04T11:22:33.123Z | 2019-09-04T11:22:33.123Z +; + +selectDateTruncWithDate +schema::dt_mil:ts|dt_cent:ts|dt_dec:ts|dt_year:ts|dt_quarter:ts|dt_month:ts|dt_week:ts|dt_day:ts +SELECT DATE_TRUNC('millennia', '2019-09-04'::date) as dt_mil, DATE_TRUNC('century', '2019-09-04'::date) as dt_cent, +DATE_TRUNC('decades', '2019-09-04'::date) as dt_dec, DATE_TRUNC('year', '2019-09-04'::date) as dt_year, +DATE_TRUNC('quarter', '2019-09-04'::date) as dt_quarter, DATE_TRUNC('month', '2019-09-04'::date) as dt_month, +DATE_TRUNC('week', '2019-09-04'::date) as dt_week, DATE_TRUNC('day', '2019-09-04'::date) as dt_day; + + dt_mil | dt_cent | dt_dec | dt_year | dt_quarter | dt_month | dt_week | dt_day +-------------------------+--------------------------+--------------------------+--------------------------+--------------------------+--------------------------+--------------------------+------------------------- +2000-01-01T00:00:00.000Z | 2000-01-01T00:00:00.000Z | 2010-01-01T00:00:00.000Z | 2019-01-01T00:00:00.000Z | 2019-07-01T00:00:00.000Z | 2019-09-01T00:00:00.000Z | 2019-09-02T00:00:00.000Z | 2019-09-04T00:00:00.000Z +; + +selectDateTruncWithField +schema::emp_no:i|birth_date:ts|dt_mil:ts|dt_cent:ts|dt_dec:ts|dt_year:ts|dt_quarter:ts|dt_month:ts|dt_week:ts|dt_day:ts +SELECT emp_no, birth_date, DATE_TRUNC('millennium', birth_date) as dt_mil, DATE_TRUNC('centuries', birth_date) as dt_cent, +DATE_TRUNC('decades', birth_date) as dt_dec, DATE_TRUNC('year', birth_date) as dt_year, DATE_TRUNC('quarter', birth_date) as dt_quarter, +DATE_TRUNC('month', birth_date) as dt_month, DATE_TRUNC('week', birth_date) as dt_week, DATE_TRUNC('day', birth_date) as dt_day +FROM test_emp WHERE emp_no >= 10032 AND emp_no <= 10042 ORDER BY 1; + + emp_no | birth_date | dt_mil | dt_cent | dt_dec | dt_year | dt_quarter | dt_month | dt_week | dt_day +--------+-------------------------+--------------------------+--------------------------+--------------------------+--------------------------+--------------------------+--------------------------+--------------------------+------------------------- +10032 |1960-08-09 00:00:00.000Z | 0999-12-27 00:00:00.000Z | 1900-01-01 00:00:00.000Z | 1960-01-01 00:00:00.000Z | 1960-01-01 00:00:00.000Z | 1960-07-01 00:00:00.000Z | 1960-08-01 00:00:00.000Z | 1960-08-08 00:00:00.000Z | 1960-08-09 00:00:00.000Z +10033 |1956-11-14 00:00:00.000Z | 0999-12-27 00:00:00.000Z | 1900-01-01 00:00:00.000Z | 1950-01-01 00:00:00.000Z | 1956-01-01 00:00:00.000Z | 1956-10-01 00:00:00.000Z | 1956-11-01 00:00:00.000Z | 1956-11-12 00:00:00.000Z | 1956-11-14 00:00:00.000Z +10034 |1962-12-29 00:00:00.000Z | 0999-12-27 00:00:00.000Z | 1900-01-01 00:00:00.000Z | 1960-01-01 00:00:00.000Z | 1962-01-01 00:00:00.000Z | 1962-10-01 00:00:00.000Z | 1962-12-01 00:00:00.000Z | 1962-12-24 00:00:00.000Z | 1962-12-29 00:00:00.000Z +10035 |1953-02-08 00:00:00.000Z | 0999-12-27 00:00:00.000Z | 1900-01-01 00:00:00.000Z | 1950-01-01 00:00:00.000Z | 1953-01-01 00:00:00.000Z | 1953-01-01 00:00:00.000Z | 1953-02-01 00:00:00.000Z | 1953-02-02 00:00:00.000Z | 1953-02-08 00:00:00.000Z +10036 |1959-08-10 00:00:00.000Z | 0999-12-27 00:00:00.000Z | 1900-01-01 00:00:00.000Z | 1950-01-01 00:00:00.000Z | 1959-01-01 00:00:00.000Z | 1959-07-01 00:00:00.000Z | 1959-08-01 00:00:00.000Z | 1959-08-10 00:00:00.000Z | 1959-08-10 00:00:00.000Z +10037 |1963-07-22 00:00:00.000Z | 0999-12-27 00:00:00.000Z | 1900-01-01 00:00:00.000Z | 1960-01-01 00:00:00.000Z | 1963-01-01 00:00:00.000Z | 1963-07-01 00:00:00.000Z | 1963-07-01 00:00:00.000Z | 1963-07-22 00:00:00.000Z | 1963-07-22 00:00:00.000Z +10038 |1960-07-20 00:00:00.000Z | 0999-12-27 00:00:00.000Z | 1900-01-01 00:00:00.000Z | 1960-01-01 00:00:00.000Z | 1960-01-01 00:00:00.000Z | 1960-07-01 00:00:00.000Z | 1960-07-01 00:00:00.000Z | 1960-07-18 00:00:00.000Z | 1960-07-20 00:00:00.000Z +10039 |1959-10-01 00:00:00.000Z | 0999-12-27 00:00:00.000Z | 1900-01-01 00:00:00.000Z | 1950-01-01 00:00:00.000Z | 1959-01-01 00:00:00.000Z | 1959-10-01 00:00:00.000Z | 1959-10-01 00:00:00.000Z | 1959-09-28 00:00:00.000Z | 1959-10-01 00:00:00.000Z +10040 | null | null | null | null | null | null | null | null | null +10041 | null | null | null | null | null | null | null | null | null +10042 | null | null | null | null | null | null | null | null | null +; + +selectDateTruncWithNullTruncateField +SELECT DATE_TRUNC(null, birth_date) AS dt FROM test_emp LIMIT 5; + + dt:ts +------ +null +null +null +null +null +; + +selectDateTruncWithScalars +SELECT birth_date, DATE_TRUNC(CAST(CHAR(109) AS VARCHAR), birth_date + INTERVAL 12 YEAR) AS dt FROM test_emp ORDER BY 1 DESC NULLS LAST LIMIT 5; + + birth_date:ts | dt:ts +-------------------------+--------------------- +1965-01-03 00:00:00.000Z | 1977-01-01 00:00:00.000Z +1964-10-18 00:00:00.000Z | 1976-10-01 00:00:00.000Z +1964-06-11 00:00:00.000Z | 1976-06-01 00:00:00.000Z +1964-06-02 00:00:00.000Z | 1976-06-01 00:00:00.000Z +1964-04-18 00:00:00.000Z | 1976-04-01 00:00:00.000Z +; + +selectDateTruncWithTruncArgFromField +SELECT DATE_TRUNC(CONCAT(gender, 'illennium'), birth_date) AS dt FROM test_emp WHERE gender='M' ORDER BY 1 DESC LIMIT 2; + + dt:ts +------------------------ +0999-12-27 00:00:00.000Z +0999-12-27 00:00:00.000Z +; + +selectDateTruncWithComplexExpressions +SELECT gender, birth_date, DATE_TRUNC(CASE WHEN gender = 'M' THEN CONCAT(gender, 'onths') WHEN gender = 'F' THEN 'decade' ELSE 'quarter' END, +birth_date + INTERVAL 10 month) AS dt FROM test_emp WHERE dt > '1954-07-01'::date ORDER BY emp_no LIMIT 10; + + gender:s | birth_date:ts | dt:ts +------------+--------------------------+--------------------- +F | 1964-06-02 00:00:00.000Z | 1960-01-01 00:00:00.000Z +M | 1959-12-03 00:00:00.000Z | 1960-10-01 00:00:00.000Z +M | 1954-05-01 00:00:00.000Z | 1955-03-01 00:00:00.000Z +M | 1955-01-21 00:00:00.000Z | 1955-11-01 00:00:00.000Z +M | 1958-02-19 00:00:00.000Z | 1958-12-01 00:00:00.000Z +null | 1963-06-01 00:00:00.000Z | 1964-04-01 00:00:00.000Z +null | 1960-10-04 00:00:00.000Z | 1961-07-01 00:00:00.000Z +null | 1963-06-07 00:00:00.000Z | 1964-04-01 00:00:00.000Z +null | 1956-02-12 00:00:00.000Z | 1956-10-01 00:00:00.000Z +null | 1959-08-19 00:00:00.000Z | 1960-04-01 00:00:00.000Z +; + +dateTruncOrderBy +schema::emp_no:i|hire_date:ts|dt:ts +SELECT emp_no, hire_date, DATE_TRUNC('quarter', hire_date) as dt FROM test_emp ORDER BY dt NULLS LAST, emp_no LIMIT 5; + + emp_no | hire_date | dt +--------+--------------------------+------------------------- +10009 | 1985-02-18 00:00:00.000Z | 1985-01-01 00:00:00.000Z +10048 | 1985-02-24 00:00:00.000Z | 1985-01-01 00:00:00.000Z +10098 | 1985-05-13 00:00:00.000Z | 1985-04-01 00:00:00.000Z +10061 | 1985-09-17 00:00:00.000Z | 1985-07-01 00:00:00.000Z +10076 | 1985-07-09 00:00:00.000Z | 1985-07-01 00:00:00.000Z +; + +dateTruncFilter +schema::emp_no:i|hire_date:ts|dt:ts +SELECT emp_no, hire_date, DATE_TRUNC('quarter', hire_date) as dt FROM test_emp WHERE DATE_TRUNC('quarter', hire_date) > '1994-07-01T00:00:00.000Z'::timestamp ORDER BY emp_no; + + emp_no | hire_date | dt +--------+--------------------------+------------------------- +10016 | 1995-01-27 00:00:00.000Z | 1995-01-01 00:00:00.000Z +10019 | 1999-04-30 00:00:00.000Z | 1999-04-01 00:00:00.000Z +10022 | 1995-08-22 00:00:00.000Z | 1995-07-01 00:00:00.000Z +10024 | 1997-05-19 00:00:00.000Z | 1997-04-01 00:00:00.000Z +10026 | 1995-03-20 00:00:00.000Z | 1995-01-01 00:00:00.000Z +10054 | 1995-03-13 00:00:00.000Z | 1995-01-01 00:00:00.000Z +10084 | 1995-12-15 00:00:00.000Z | 1995-10-01 00:00:00.000Z +10093 | 1996-11-05 00:00:00.000Z | 1996-10-01 00:00:00.000Z +; + +dateTruncGroupBy +schema::count:l|dt:ts +SELECT count(*) as count, DATE_TRUNC('decade', hire_date) dt FROM test_emp GROUP BY dt ORDER BY 2; + + count | dt +--------+------------------------- +59 | 1980-01-01 00:00:00.000Z +41 | 1990-01-01 00:00:00.000Z +; + +dateTruncHaving +schema::gender:s|dt:ts +SELECT gender, max(hire_date) dt FROM test_emp GROUP BY gender HAVING DATE_TRUNC('year', max(hire_date)) >= '1997-01-01T00:00:00.000Z'::timestamp ORDER BY 1; + + gender | dt +--------+------------------------- +null | 1999-04-30 00:00:00.000Z +F | 1997-05-19 00:00:00.000Z +; + // // Aggregate // @@ -404,4 +549,4 @@ SELECT CAST (CAST (birth_date AS VARCHAR) AS TIMESTAMP) a FROM test_emp WHERE YE a:ts --------------- 1965-01-03T00:00:00Z -; \ No newline at end of file +; diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec index 1cd3b92fece..37b4176d711 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec @@ -235,16 +235,17 @@ CURRENT_DATE |SCALAR CURRENT_TIME |SCALAR CURRENT_TIMESTAMP|SCALAR CURTIME |SCALAR +DATE_TRUNC |SCALAR DAY |SCALAR DAYNAME |SCALAR -DAYOFMONTH |SCALAR -DAYOFWEEK |SCALAR -DAYOFYEAR |SCALAR -DAY_NAME |SCALAR -DAY_OF_MONTH |SCALAR -DAY_OF_WEEK |SCALAR -DAY_OF_YEAR |SCALAR -DOM |SCALAR +DAYOFMONTH |SCALAR +DAYOFWEEK |SCALAR +DAYOFYEAR |SCALAR +DAY_NAME |SCALAR +DAY_OF_MONTH |SCALAR +DAY_OF_WEEK |SCALAR +DAY_OF_YEAR |SCALAR +DOM |SCALAR DOW |SCALAR DOY |SCALAR HOUR |SCALAR @@ -2411,6 +2412,58 @@ SELECT DAY_OF_MONTH(CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS day; // end::dayOfMonth ; +truncateDateTimeMillennium +// tag::truncateDateTimeMillennium +SELECT DATE_TRUNC('millennium', '2019-09-04T11:22:33.123Z'::datetime) AS millennium; + + millennium +------------------------ +2000-01-01T00:00:00.000Z +// end::truncateDateTimeMillennium +; + +truncateDateTimeWeek +// tag::truncateDateTimeWeek +SELECT DATE_TRUNC('week', '2019-08-24T11:22:33.123Z'::datetime) AS week; + + week +------------------------ +2019-08-19T00:00:00.000Z +// end::truncateDateTimeWeek +; + +truncateDateTimeMinutes +// tag::truncateDateTimeMinutes +SELECT DATE_TRUNC('mi', '2019-09-04T11:22:33.123Z'::datetime) AS mins; + + mins +------------------------ +2019-09-04T11:22:00.000Z +// end::truncateDateTimeMinutes +; + +truncateDateDecades +schema::decades:ts +// tag::truncateDateDecades +SELECT DATE_TRUNC('decade', CAST('2019-09-04' AS DATE)) AS decades; + + decades +------------------------ +2010-01-01T00:00:00.000Z +// end::truncateDateDecades +; + +truncateDateQuarter +schema::quarter:ts +// tag::truncateDateQuarter +SELECT DATE_TRUNC('quarters', CAST('2019-09-04' AS DATE)) AS quarter; + + quarter +------------------------ +2019-07-01T00:00:00.000Z +// end::truncateDateQuarter +; + constantDayOfWeek // tag::dayOfWeek SELECT DAY_OF_WEEK(CAST('2018-02-19T10:23:27Z' AS TIMESTAMP)) AS day; diff --git a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns_empty.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns_empty.sql new file mode 100644 index 00000000000..d3bedc429e0 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns_empty.sql @@ -0,0 +1,25 @@ +CREATE TABLE mock ( + TABLE_SCHEM VARCHAR, + TABLE_NAME VARCHAR, + COLUMN_NAME VARCHAR, + DATA_TYPE INTEGER, + TYPE_NAME VARCHAR, + COLUMN_SIZE INTEGER, + BUFFER_LENGTH INTEGER, + DECIMAL_DIGITS INTEGER, + NUM_PREC_RADIX INTEGER, + NULLABLE INTEGER, + REMARKS VARCHAR, + COLUMN_DEF VARCHAR, + SQL_DATA_TYPE INTEGER, + SQL_DATETIME_SUB INTEGER, + CHAR_OCTET_LENGTH INTEGER, + ORDINAL_POSITION INTEGER, + IS_NULLABLE VARCHAR, + SCOPE_CATALOG VARCHAR, + SCOPE_SCHEMA VARCHAR, + SCOPE_TABLE VARCHAR, + SOURCE_DATA_TYPE SMALLINT, + IS_AUTOINCREMENT VARCHAR, + IS_GENERATEDCOLUMN VARCHAR +); diff --git a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_tables_empty.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_tables_empty.sql new file mode 100644 index 00000000000..c602e97c4d7 --- /dev/null +++ b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_tables_empty.sql @@ -0,0 +1,15 @@ +CREATE TABLE mock ( + TABLE_SCHEM VARCHAR, + TABLE_NAME VARCHAR, + TABLE_TYPE VARCHAR, + REMARKS VARCHAR, + TYPE_CAT VARCHAR, + TYPE_SCHEM VARCHAR, + TYPE_NAME VARCHAR, + SELF_REFERENCING_COL_NAME VARCHAR, + REF_GENERATION VARCHAR +) AS +SELECT null, 'test_empty', 'BASE TABLE', '', null, null, null, null, null FROM DUAL +UNION ALL +SELECT null, 'test_empty_again', 'BASE TABLE', '', null, null, null, null, null FROM DUAL +; diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/StringUtils.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/StringUtils.java index bd90354e5f9..44800fdc6c2 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/StringUtils.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/StringUtils.java @@ -21,6 +21,7 @@ import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE; import static java.time.temporal.ChronoField.HOUR_OF_DAY; import static java.time.temporal.ChronoField.MILLI_OF_SECOND; import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; +import static java.time.temporal.ChronoField.NANO_OF_SECOND; import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; public final class StringUtils { @@ -40,6 +41,19 @@ public final class StringUtils { .appendOffsetId() .toFormatter(Locale.ROOT); + public static final DateTimeFormatter ISO_DATE_WITH_NANOS = new DateTimeFormatterBuilder() + .parseCaseInsensitive() + .append(ISO_LOCAL_DATE) + .appendLiteral('T') + .appendValue(HOUR_OF_DAY, 2) + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2) + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2) + .appendFraction(NANO_OF_SECOND, 3, 9, true) + .appendOffsetId() + .toFormatter(Locale.ROOT); + public static final DateTimeFormatter ISO_TIME_WITH_MILLIS = new DateTimeFormatterBuilder() .parseCaseInsensitive() .appendValue(HOUR_OF_DAY, 2) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java index 40a34dcf006..901318258c0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Analyzer.java @@ -110,6 +110,7 @@ public class Analyzer extends RuleExecutor { new ResolveFunctions(), new ResolveAliases(), new ProjectedAggregations(), + new HavingOverProject(), new ResolveAggsInHaving(), new ResolveAggsInOrderBy() //new ImplicitCasting() @@ -1002,6 +1003,45 @@ public class Analyzer extends RuleExecutor { } } + // + // Detect implicit grouping with filtering and convert them into aggregates. + // SELECT 1 FROM x HAVING COUNT(*) > 0 + // is a filter followed by projection and fails as the engine does not + // understand it is an implicit grouping. + // + private static class HavingOverProject extends AnalyzeRule { + + @Override + protected LogicalPlan rule(Filter f) { + if (f.child() instanceof Project) { + Project p = (Project) f.child(); + + for (Expression n : p.projections()) { + if (n instanceof Alias) { + n = ((Alias) n).child(); + } + // no literal or aggregates - it's a 'regular' projection + if (n.foldable() == false && Functions.isAggregate(n) == false + // folding might not work (it might wait for the optimizer) + // so check whether any column is referenced + && n.anyMatch(e -> e instanceof FieldAttribute) == true) { + return f; + } + } + + if (containsAggregate(f.condition())) { + return new Filter(f.source(), new Aggregate(p.source(), p.child(), emptyList(), p.projections()), f.condition()); + } + } + return f; + } + + @Override + protected boolean skipResolved() { + return false; + } + } + // // Handle aggs in HAVING. To help folding any aggs not found in Aggregation // will be pushed down to the Aggregate and then projected. This also simplifies the Verifier's job. @@ -1237,14 +1277,13 @@ public class Analyzer extends RuleExecutor { protected LogicalPlan rule(LogicalPlan plan) { if (plan instanceof Project) { Project p = (Project) plan; - return new Project(p.source(), p.child(), cleanExpressions(p.projections())); + return new Project(p.source(), p.child(), cleanSecondaryAliases(p.projections())); } if (plan instanceof Aggregate) { Aggregate a = (Aggregate) plan; // clean group expressions - List cleanedGroups = a.groupings().stream().map(CleanAliases::trimAliases).collect(toList()); - return new Aggregate(a.source(), a.child(), cleanedGroups, cleanExpressions(a.aggregates())); + return new Aggregate(a.source(), a.child(), cleanAllAliases(a.groupings()), cleanSecondaryAliases(a.aggregates())); } return plan.transformExpressionsOnly(e -> { @@ -1255,8 +1294,20 @@ public class Analyzer extends RuleExecutor { }); } - private List cleanExpressions(List args) { - return args.stream().map(CleanAliases::trimNonTopLevelAliases).map(NamedExpression.class::cast).collect(toList()); + private List cleanSecondaryAliases(List args) { + List cleaned = new ArrayList<>(args.size()); + for (NamedExpression ne : args) { + cleaned.add((NamedExpression) trimNonTopLevelAliases(ne)); + } + return cleaned; + } + + private List cleanAllAliases(List args) { + List cleaned = new ArrayList<>(args.size()); + for (Expression e : args) { + cleaned.add(trimAliases(e)); + } + return cleaned; } public static Expression trimNonTopLevelAliases(Expression e) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java index 0b596a1f894..f4acf809c98 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolver.java @@ -338,12 +338,13 @@ public class IndexResolver { return null; }); - if (indices.size() != 1) { - throw new SqlIllegalArgumentException("Incorrect merging of mappings (likely due to a bug) - expect 1 but found [{}]", + if (indices.size() > 1) { + throw new SqlIllegalArgumentException( + "Incorrect merging of mappings (likely due to a bug) - expect at most one but found [{}]", indices.size()); } - return IndexResolution.valid(indices.get(0)); + return IndexResolution.valid(indices.isEmpty() ? new EsIndex(indexNames[0], emptyMap()) : indices.get(0)); } private static EsField createField(String fieldName, Map> globalCaps, diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java index 579fd934b48..b4ccd7eb9ff 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/Literal.java @@ -77,7 +77,7 @@ public class Literal extends NamedExpression { @Override public Attribute toAttribute() { - return new LiteralAttribute(source(), name(), null, Nullability.FALSE, id(), false, dataType, this); + return new LiteralAttribute(source(), name(), null, nullable(), id(), false, dataType, this); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java index 6463520cf83..1305240b609 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/LiteralAttribute.java @@ -41,4 +41,9 @@ public class LiteralAttribute extends TypedAttribute { public Pipe asPipe() { return literal.asPipe(); } + + @Override + public Object fold() { + return literal.fold(); + } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java index d382dad83a1..c465ab1b2de 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/TypeResolutions.java @@ -57,7 +57,6 @@ public final class TypeResolutions { "date", "time", "datetime", "numeric"); } - public static TypeResolution isGeo(Expression e, String operationName, ParamOrdinal paramOrd) { return isType(e, DataType::isGeo, operationName, paramOrd, "geo_point", "geo_shape"); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java index 3a9ae062034..0ebe256fe09 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/FunctionRegistry.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.User; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.CurrentDate; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.CurrentDateTime; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.CurrentTime; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTrunc; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayName; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfMonth; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DayOfWeek; @@ -104,8 +105,8 @@ import org.elasticsearch.xpack.sql.expression.function.scalar.string.UCase; import org.elasticsearch.xpack.sql.expression.predicate.conditional.Case; import org.elasticsearch.xpack.sql.expression.predicate.conditional.Coalesce; import org.elasticsearch.xpack.sql.expression.predicate.conditional.Greatest; -import org.elasticsearch.xpack.sql.expression.predicate.conditional.Iif; import org.elasticsearch.xpack.sql.expression.predicate.conditional.IfNull; +import org.elasticsearch.xpack.sql.expression.predicate.conditional.Iif; import org.elasticsearch.xpack.sql.expression.predicate.conditional.Least; import org.elasticsearch.xpack.sql.expression.predicate.conditional.NullIf; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.Mod; @@ -193,6 +194,7 @@ public class FunctionRegistry { def(DayOfMonth.class, DayOfMonth::new, "DAY_OF_MONTH", "DAYOFMONTH", "DAY", "DOM"), def(DayOfWeek.class, DayOfWeek::new, "DAY_OF_WEEK", "DAYOFWEEK", "DOW"), def(DayOfYear.class, DayOfYear::new, "DAY_OF_YEAR", "DAYOFYEAR", "DOY"), + def(DateTrunc.class, DateTrunc::new, "DATE_TRUNC"), def(HourOfDay.class, HourOfDay::new, "HOUR_OF_DAY", "HOUR"), def(IsoDayOfWeek.class, IsoDayOfWeek::new, "ISO_DAY_OF_WEEK", "ISODAYOFWEEK", "ISODOW", "IDOW"), def(IsoWeekOfYear.class, IsoWeekOfYear::new, "ISO_WEEK_OF_YEAR", "ISOWEEKOFYEAR", "ISOWEEK", "IWOY", "IW"), diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java index 0b9bbd1094a..fa1eda8b152 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Processors.java @@ -8,13 +8,14 @@ package org.elasticsearch.xpack.sql.expression.function.scalar; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTruncProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NonIsoDateTimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.TimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistanceProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StWkttosqlProcessor; -import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.TimeProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor; @@ -88,6 +89,7 @@ public final class Processors { entries.add(new Entry(Processor.class, NamedDateTimeProcessor.NAME, NamedDateTimeProcessor::new)); entries.add(new Entry(Processor.class, NonIsoDateTimeProcessor.NAME, NonIsoDateTimeProcessor::new)); entries.add(new Entry(Processor.class, QuarterProcessor.NAME, QuarterProcessor::new)); + entries.add(new Entry(Processor.class, DateTruncProcessor.NAME, DateTruncProcessor::new)); // math entries.add(new Entry(Processor.class, BinaryMathProcessor.NAME, BinaryMathProcessor::new)); entries.add(new Entry(Processor.class, BinaryOptionalMathProcessor.NAME, BinaryOptionalMathProcessor::new)); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTrunc.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTrunc.java new file mode 100644 index 00000000000..7d403ee3a3a --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTrunc.java @@ -0,0 +1,250 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expressions; +import org.elasticsearch.xpack.sql.expression.Nullability; +import org.elasticsearch.xpack.sql.expression.function.scalar.BinaryScalarFunction; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.type.DataType; +import org.elasticsearch.xpack.sql.util.StringUtils; + +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoField; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isDate; +import static org.elasticsearch.xpack.sql.expression.TypeResolutions.isString; +import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTruncProcessor.process; +import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; + +public class DateTrunc extends BinaryScalarFunction { + + public enum Part { + + MILLENNIUM(dt -> { + int year = dt.getYear(); + int firstYearOfMillenium = year - (year % 1000); + return dt + .with(ChronoField.YEAR, firstYearOfMillenium) + .with(ChronoField.MONTH_OF_YEAR, 1) + .with(ChronoField.DAY_OF_MONTH, 1) + .toLocalDate().atStartOfDay(dt.getZone()); + },"millennia"), + CENTURY(dt -> { + int year = dt.getYear(); + int firstYearOfCentury = year - (year % 100); + return dt + .with(ChronoField.YEAR, firstYearOfCentury) + .with(ChronoField.MONTH_OF_YEAR, 1) + .with(ChronoField.DAY_OF_MONTH, 1) + .toLocalDate().atStartOfDay(dt.getZone()); + }, "centuries"), + DECADE(dt -> { + int year = dt.getYear(); + int firstYearOfDecade = year - (year % 10); + return dt + .with(ChronoField.YEAR, firstYearOfDecade) + .with(ChronoField.MONTH_OF_YEAR, 1) + .with(ChronoField.DAY_OF_MONTH, 1) + .toLocalDate().atStartOfDay(dt.getZone()); + }, "decades"), + YEAR(dt -> dt + .with(ChronoField.MONTH_OF_YEAR, 1) + .with(ChronoField.DAY_OF_MONTH, 1) + .toLocalDate().atStartOfDay(dt.getZone()), + "years", "yy", "yyyy"), + QUARTER(dt -> { + int month = dt.getMonthValue(); + int firstMonthOfQuarter = (((month - 1) / 3) * 3) + 1; + return dt + .with(ChronoField.MONTH_OF_YEAR, firstMonthOfQuarter) + .with(ChronoField.DAY_OF_MONTH, 1) + .toLocalDate().atStartOfDay(dt.getZone()); + }, "quarters", "qq", "q"), + MONTH(dt -> dt + .with(ChronoField.DAY_OF_MONTH, 1) + .toLocalDate().atStartOfDay(dt.getZone()), + "months", "mm", "m"), + WEEK(dt -> dt + .with(ChronoField.DAY_OF_WEEK, 1) + .toLocalDate().atStartOfDay(dt.getZone()), + "weeks", "wk", "ww"), + DAY(dt -> dt.toLocalDate().atStartOfDay(dt.getZone()), "days", "dd", "d"), + HOUR(dt -> { + int hour = dt.getHour(); + return dt.toLocalDate().atStartOfDay(dt.getZone()) + .with(ChronoField.HOUR_OF_DAY, hour); + }, "hours", "hh"), + MINUTE(dt -> { + int hour = dt.getHour(); + int minute = dt.getMinute(); + return dt.toLocalDate().atStartOfDay(dt.getZone()) + .with(ChronoField.HOUR_OF_DAY, hour) + .with(ChronoField.MINUTE_OF_HOUR, minute); + }, "minutes", "mi", "n"), + SECOND(dt -> dt.with(ChronoField.NANO_OF_SECOND, 0), "seconds", "ss", "s"), + MILLISECOND(dt -> { + int micros = dt.get(ChronoField.MICRO_OF_SECOND); + return dt.with(ChronoField.MILLI_OF_SECOND, (micros / 1000)); + }, "milliseconds", "ms"), + MICROSECOND(dt -> { + int nanos = dt.getNano(); + return dt.with(ChronoField.MICRO_OF_SECOND, (nanos / 1000)); + }, "microseconds", "mcs"), + NANOSECOND(dt -> dt, "nanoseconds", "ns"); + + private static final Map NAME_TO_PART; + + static { + NAME_TO_PART = new HashMap<>(); + + for (Part datePart : Part.values()) { + String lowerCaseName = datePart.name().toLowerCase(Locale.ROOT); + + NAME_TO_PART.put(lowerCaseName, datePart); + for (String alias : datePart.aliases) { + NAME_TO_PART.put(alias, datePart); + } + } + } + + private Set aliases; + private Function truncateFunction; + + Part(Function truncateFunction, String... aliases) { + this.truncateFunction = truncateFunction; + this.aliases = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(aliases))); + } + + public static Part resolveTruncate(String truncateTo) { + return NAME_TO_PART.get(truncateTo.toLowerCase(Locale.ROOT)); + } + + public static List findSimilar(String match) { + return StringUtils.findSimilar(match, NAME_TO_PART.keySet()); + } + + public ZonedDateTime truncate(ZonedDateTime dateTime) { + return truncateFunction.apply(dateTime); + } + } + + private final ZoneId zoneId; + + public DateTrunc(Source source, Expression truncateTo, Expression timestamp, ZoneId zoneId) { + super(source, truncateTo, timestamp); + this.zoneId = zoneId; + } + + @Override + public DataType dataType() { + return DataType.DATETIME; + } + + @Override + protected TypeResolution resolveType() { + TypeResolution resolution = isString(left(), sourceText(), Expressions.ParamOrdinal.FIRST); + if (resolution.unresolved()) { + return resolution; + } + + if (left().foldable()) { + String truncateToValue = (String) left().fold(); + if (truncateToValue != null && Part.resolveTruncate(truncateToValue) == null) { + List similar = Part.findSimilar(truncateToValue); + if (similar.isEmpty()) { + return new TypeResolution(format(null, "first argument of [{}] must be one of {} or their aliases, found value [{}]", + sourceText(), + Part.values(), + Expressions.name(left()))); + } else { + return new TypeResolution(format(null, "Unknown value [{}] for first argument of [{}]; did you mean {}?", + Expressions.name(left()), + sourceText(), + similar)); + } + } + } + resolution = isDate(right(), sourceText(), Expressions.ParamOrdinal.SECOND); + if (resolution.unresolved()) { + return resolution; + } + return TypeResolution.TYPE_RESOLVED; + } + + @Override + protected BinaryScalarFunction replaceChildren(Expression newTruncateTo, Expression newTimestamp) { + return new DateTrunc(source(), newTruncateTo, newTimestamp, zoneId); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, DateTrunc::new, left(), right(), zoneId); + } + + @Override + protected Pipe makePipe() { + return new DateTruncPipe(source(), this, Expressions.pipe(left()), Expressions.pipe(right()), zoneId); + } + + @Override + public Nullability nullable() { + return Nullability.TRUE; + } + + @Override + public Object fold() { + return process(left().fold(), right().fold(), zoneId); + } + + @Override + protected ScriptTemplate asScriptFrom(ScriptTemplate leftScript, ScriptTemplate rightScript) { + return new ScriptTemplate( + formatTemplate("{sql}.dateTrunc(" + leftScript.template() + "," + rightScript.template()+ ",{})"), + paramsBuilder() + .script(leftScript.params()) + .script(rightScript.params()) + .variable(zoneId.getId()) + .build(), + dataType()); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), zoneId); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + DateTrunc dateTrunc = (DateTrunc) o; + return Objects.equals(zoneId, dateTrunc.zoneId); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTruncPipe.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTruncPipe.java new file mode 100644 index 00000000000..a456883f788 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTruncPipe.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.BinaryPipe; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; + +import java.time.ZoneId; +import java.util.Objects; + +public class DateTruncPipe extends BinaryPipe { + + private final ZoneId zoneId; + + public DateTruncPipe(Source source, Expression expression, Pipe left, Pipe right, ZoneId zoneId) { + super(source, expression, left, right); + this.zoneId = zoneId; + } + + ZoneId zoneId() { + return zoneId; + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, DateTruncPipe::new, expression(), left(), right(), zoneId); + } + + @Override + protected BinaryPipe replaceChildren(Pipe left, Pipe right) { + return new DateTruncPipe(source(), expression(), left, right, zoneId); + } + + @Override + public DateTruncProcessor asProcessor() { + return new DateTruncProcessor(left().asProcessor(), right().asProcessor(), zoneId); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + DateTruncPipe that = (DateTruncPipe) o; + return zoneId.equals(that.zoneId); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), zoneId); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTruncProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTruncProcessor.java new file mode 100644 index 00000000000..446ede5ba14 --- /dev/null +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTruncProcessor.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.common.io.SqlStreamInput; +import org.elasticsearch.xpack.sql.expression.gen.processor.BinaryProcessor; +import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; + +import java.io.IOException; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTrunc.Part; + +public class DateTruncProcessor extends BinaryProcessor { + + public static final String NAME = "dtrunc"; + + private final ZoneId zoneId; + + public DateTruncProcessor(Processor source1, Processor source2, ZoneId zoneId) { + super(source1, source2); + this.zoneId = zoneId; + } + + public DateTruncProcessor(StreamInput in) throws IOException { + super(in); + zoneId = SqlStreamInput.asSqlStream(in).zoneId(); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected void doWrite(StreamOutput out) { + } + + ZoneId zoneId() { + return zoneId; + } + + @Override + protected Object doProcess(Object left, Object right) { + return process(left, right, zoneId); + } + + /** + * Used in Painless scripting + */ + public static Object process(Object source1, Object source2, String zoneId) { + return process(source1, source2, ZoneId.of(zoneId)); + } + + static Object process(Object source1, Object source2, ZoneId zoneId) { + if (source1 == null || source2 == null) { + return null; + } + if (!(source1 instanceof String)) { + throw new SqlIllegalArgumentException("A string is required; received [{}]", source1); + } + Part truncateDateField = Part.resolveTruncate((String) source1); + if (truncateDateField == null) { + List similar = Part.findSimilar((String) source1); + if (similar.isEmpty()) { + throw new SqlIllegalArgumentException("A value of {} or their aliases is required; received [{}]", + Part.values(), source1); + } else { + throw new SqlIllegalArgumentException("Received value [{}] is not valid date part for truncation; " + "" + + "did you mean {}?", source1, similar); + } + } + + if (!(source2 instanceof ZonedDateTime)) { + throw new SqlIllegalArgumentException("A datetime/date is required; received [{}]", source2); + } + + return truncateDateField.truncate(((ZonedDateTime) source2).withZoneSameInstant(zoneId)); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DateTruncProcessor that = (DateTruncProcessor) o; + return zoneId.equals(that.zoneId); + } + + @Override + public int hashCode() { + return Objects.hash(zoneId); + } +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java index d39aec44236..73ceb26a7ea 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java @@ -10,14 +10,15 @@ import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.script.JodaCompatibleZonedDateTime; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTruncProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NonIsoDateTimeProcessor.NonIsoDateTimeExtractor; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.QuarterProcessor; +import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.TimeFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.geo.GeoShape; import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StDistanceProcessor; import org.elasticsearch.xpack.sql.expression.function.scalar.geo.StWkttosqlProcessor; -import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.TimeFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor.BinaryMathOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryOptionalMathProcessor.BinaryOptionalMathOperation; import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation; @@ -369,6 +370,10 @@ public final class InternalSqlScriptUtils { return NonIsoDateTimeExtractor.WEEK_OF_YEAR.extract(asDateTime(dateTime), tzId); } + public static ZonedDateTime dateTrunc(String truncateTo, Object dateTime, String tzId) { + return (ZonedDateTime) DateTruncProcessor.process(truncateTo, asDateTime(dateTime) ,tzId); + } + public static ZonedDateTime asDateTime(Object dateTime) { return (ZonedDateTime) asDateTime(dateTime, false); } @@ -400,7 +405,7 @@ public final class InternalSqlScriptUtils { if (text == null || typeName == null) { return null; } - return new IntervalDayTime(Duration.parse(text), DataType.fromTypeName(typeName)); + return new IntervalDayTime(Duration.parse(text), DataType.fromSqlOrEsType(typeName)); } public static IntervalYearMonth intervalYearMonth(String text, String typeName) { @@ -408,7 +413,7 @@ public final class InternalSqlScriptUtils { return null; } - return new IntervalYearMonth(Period.parse(text), DataType.fromTypeName(typeName)); + return new IntervalYearMonth(Period.parse(text), DataType.fromSqlOrEsType(typeName)); } public static OffsetTime asTime(String time) { @@ -548,6 +553,6 @@ public final class InternalSqlScriptUtils { public static Object cast(Object value, String typeName) { // we call asDateTime here to make sure we handle JodaCompatibleZonedDateTime properly, // since casting works for ZonedDateTime objects only - return DataTypeConversion.convert(asDateTime(value, true), DataType.fromTypeName(typeName)); + return DataTypeConversion.convert(asDateTime(value, true), DataType.fromSqlOrEsType(typeName)); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java index 72371ab9617..6689a33b162 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/optimizer/Optimizer.java @@ -1172,7 +1172,9 @@ public class Optimizer extends RuleExecutor { return Literal.of(in, null); } - } else if (e.nullable() == Nullability.TRUE && Expressions.anyMatch(e.children(), Expressions::isNull)) { + } else if (e instanceof Alias == false + && e.nullable() == Nullability.TRUE + && Expressions.anyMatch(e.children(), Expressions::isNull)) { return Literal.of(e, null); } @@ -1188,11 +1190,6 @@ public class Optimizer extends RuleExecutor { @Override protected Expression rule(Expression e) { - if (e instanceof Alias) { - Alias a = (Alias) e; - return a.child().foldable() ? Literal.of(a.name(), a.child()) : a; - } - return e.foldable() ? Literal.of(e) : e; } } @@ -1968,7 +1965,16 @@ public class Optimizer extends RuleExecutor { private List extractConstants(List named) { List values = new ArrayList<>(); for (NamedExpression n : named) { - if (n.foldable()) { + if (n instanceof Alias) { + Alias a = (Alias) n; + if (a.child().foldable()) { + values.add(a.child().fold()); + } + // not everything is foldable, bail out early + else { + return values; + } + } else if (n.foldable()) { values.add(n.fold()); } else { // not everything is foldable, bail-out early diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java index 802d6d37b7c..ae875d6fc6e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryFolder.java @@ -135,7 +135,6 @@ class QueryFolder extends RuleExecutor { // for named expressions nothing is recorded as these are resolved last // otherwise 'intermediate' projects might pollute the // output - if (pj instanceof ScalarFunction) { ScalarFunction f = (ScalarFunction) pj; processors.put(f.toAttribute(), Expressions.pipe(f)); @@ -348,6 +347,9 @@ class QueryFolder extends RuleExecutor { queryC = queryC.addColumn(new GroupByRef(matchingGroup.id(), null, child.dataType().isDateBased()), ((GroupingFunction) child).toAttribute()); } + else if (child.foldable()) { + queryC = queryC.addColumn(ne.toAttribute()); + } // fallback to regular agg functions else { // the only thing left is agg function @@ -369,6 +371,9 @@ class QueryFolder extends RuleExecutor { queryC = queryC.addColumn( new GroupByRef(matchingGroup.id(), null, ne.dataType().isDateBased()), ne.toAttribute()); } + else if (ne.foldable()) { + queryC = queryC.addColumn(ne.toAttribute()); + } } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java index a0a8741bdc7..5ff560f4baa 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainer.java @@ -178,6 +178,7 @@ public class QueryContainer { Attribute alias = aliases.get(column); // find the column index int index = -1; + ExpressionId id = column instanceof AggregateFunctionAttribute ? ((AggregateFunctionAttribute) column).innerId() : column.id(); ExpressionId aliasId = alias != null ? (alias instanceof AggregateFunctionAttribute ? ((AggregateFunctionAttribute) alias) .innerId() : alias.id()) : null; @@ -188,6 +189,7 @@ public class QueryContainer { break; } } + if (index > -1) { mask.set(index); } else { @@ -227,7 +229,7 @@ public class QueryContainer { public boolean isAggsOnly() { if (aggsOnly == null) { - aggsOnly = Boolean.valueOf(this.fields.stream().allMatch(t -> t.v1().supportedByAggsOnlyQuery())); + aggsOnly = Boolean.valueOf(this.fields.stream().anyMatch(t -> t.v1().supportedByAggsOnlyQuery())); } return aggsOnly.booleanValue(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java index 5fd1867aeb2..ebae0a516a4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypeConversion.java @@ -23,6 +23,7 @@ import static org.elasticsearch.xpack.sql.type.DataType.DATE; import static org.elasticsearch.xpack.sql.type.DataType.DATETIME; import static org.elasticsearch.xpack.sql.type.DataType.LONG; import static org.elasticsearch.xpack.sql.type.DataType.NULL; +import static org.elasticsearch.xpack.sql.type.DataType.TEXT; import static org.elasticsearch.xpack.sql.type.DataType.TIME; /** @@ -50,6 +51,12 @@ public abstract class DataTypeConversion { if (DataTypes.isNull(right)) { return left; } + if (left.isString() && right.isString()) { + if (left == TEXT) { + return TEXT; + } + return right; + } if (left.isNumeric() && right.isNumeric()) { // if one is int if (left.isInteger()) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java index 0f8afdd1552..ceda288704c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/util/DateUtils.java @@ -148,4 +148,6 @@ public final class DateUtils { nano = nano - nano % (int) Math.pow(10, (9 - precision)); return nano; } + + } diff --git a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt index 6d24ea79f2b..b326aefea09 100644 --- a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt +++ b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt @@ -115,6 +115,7 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalS String monthName(Object, String) Integer quarter(Object, String) Integer weekOfYear(Object, String) + ZonedDateTime dateTrunc(String, Object, String) IntervalDayTime intervalDayTime(String, String) IntervalYearMonth intervalYearMonth(String, String) ZonedDateTime asDateTime(Object) diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index f10b1a40270..8844301006f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -203,6 +203,32 @@ public class VerifierErrorMessagesTests extends ESTestCase { assertEquals("1:8: Invalid datetime field [ABS]. Use any datetime function.", error("SELECT EXTRACT(ABS FROM date) FROM test")); } + public void testDateTruncInvalidArgs() { + assertEquals("1:8: first argument of [DATE_TRUNC(int, date)] must be [string], found value [int] type [integer]", + error("SELECT DATE_TRUNC(int, date) FROM test")); + assertEquals("1:8: second argument of [DATE_TRUNC(keyword, keyword)] must be [date or datetime], found value [keyword] " + + "type [keyword]", error("SELECT DATE_TRUNC(keyword, keyword) FROM test")); + assertEquals("1:8: first argument of [DATE_TRUNC('invalid', keyword)] must be one of [MILLENNIUM, CENTURY, DECADE, " + "" + + "YEAR, QUARTER, MONTH, WEEK, DAY, HOUR, MINUTE, SECOND, MILLISECOND, MICROSECOND, NANOSECOND] " + + "or their aliases, found value ['invalid']", + error("SELECT DATE_TRUNC('invalid', keyword) FROM test")); + assertEquals("1:8: Unknown value ['millenioum'] for first argument of [DATE_TRUNC('millenioum', keyword)]; " + + "did you mean [millennium, millennia]?", + error("SELECT DATE_TRUNC('millenioum', keyword) FROM test")); + assertEquals("1:8: Unknown value ['yyyz'] for first argument of [DATE_TRUNC('yyyz', keyword)]; " + + "did you mean [yyyy, yy]?", + error("SELECT DATE_TRUNC('yyyz', keyword) FROM test")); + } + + public void testDateTruncValidArgs() { + accept("SELECT DATE_TRUNC('decade', date) FROM test"); + accept("SELECT DATE_TRUNC('decades', date) FROM test"); + accept("SELECT DATE_TRUNC('day', date) FROM test"); + accept("SELECT DATE_TRUNC('days', date) FROM test"); + accept("SELECT DATE_TRUNC('dd', date) FROM test"); + accept("SELECT DATE_TRUNC('d', date) FROM test"); + } + public void testValidDateTimeFunctionsOnTime() { accept("SELECT HOUR_OF_DAY(CAST(date AS TIME)) FROM test"); accept("SELECT MINUTE_OF_HOUR(CAST(date AS TIME)) FROM test"); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java index fbd004a71df..d57c090817d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/index/IndexResolverTests.java @@ -20,6 +20,7 @@ import java.util.Map; import java.util.Map.Entry; import java.util.stream.Stream; +import static java.util.Collections.singletonMap; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; public class IndexResolverTests extends ESTestCase { @@ -211,6 +212,12 @@ public class IndexResolverTests extends ESTestCase { } } + public void testIndexWithNoMapping() { + Map> versionFC = singletonMap("_version", + singletonMap("_index", new FieldCapabilities("_version", "_version", false, false))); + assertTrue(IndexResolver.mergedMappings("*", new String[] { "empty" }, versionFC).isValid()); + } + public static IndexResolution merge(EsIndex... indices) { return IndexResolver.mergedMappings("*", Stream.of(indices).map(EsIndex::name).toArray(String[]::new), fromMappings(indices)); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/FunctionTestUtils.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/FunctionTestUtils.java index 3cc1b6d987d..6ddae840094 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/FunctionTestUtils.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/FunctionTestUtils.java @@ -9,6 +9,8 @@ package org.elasticsearch.xpack.sql.expression.function.scalar; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.expression.Literal; +import java.time.Instant; +import java.time.ZonedDateTime; import java.util.BitSet; import java.util.Iterator; @@ -27,7 +29,11 @@ public final class FunctionTestUtils { public static Literal randomIntLiteral() { return l(ESTestCase.randomInt()); } - + + public static Literal randomDatetimeLiteral() { + return l(ZonedDateTime.ofInstant(Instant.ofEpochMilli(ESTestCase.randomLong()), ESTestCase.randomZone())); + } + public static class Combinations implements Iterable { private int n; private int k; @@ -41,6 +47,7 @@ public final class FunctionTestUtils { public Iterator iterator() { return new Iterator() { BitSet bs = new BitSet(n); + { bs.set(0, k); } @@ -55,9 +62,9 @@ public final class FunctionTestUtils { BitSet old = (BitSet) bs.clone(); int b = bs.previousClearBit(n - 1); int b1 = bs.previousSetBit(b); - if (b1 == -1) + if (b1 == -1) { bs = null; - else { + } else { bs.clear(b1); bs.set(b1 + 1, b1 + (n - b) + 1); bs.clear(b1 + (n - b) + 1, n); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java index 13215eb41ae..45bb3752123 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeTestUtils.java @@ -20,6 +20,10 @@ public class DateTimeTestUtils { return ZonedDateTime.of(year, month, day, hour, minute, 0, 0, DateUtils.UTC); } + public static ZonedDateTime dateTime(int year, int month, int day, int hour, int minute, int seconds, int nanos) { + return ZonedDateTime.of(year, month, day, hour, minute, seconds, nanos, DateUtils.UTC); + } + public static ZonedDateTime dateTime(long millisSinceEpoch) { return DateUtils.asDateTime(millisSinceEpoch); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTruncPipeTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTruncPipeTests.java new file mode 100644 index 00000000000..3c85aa5257e --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTruncPipeTests.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.function.scalar.FunctionTestUtils; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.BinaryPipe; +import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; +import org.elasticsearch.xpack.sql.tree.AbstractNodeTestCase; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.tree.SourceTests; + +import java.time.ZoneId; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +import static org.elasticsearch.xpack.sql.expression.Expressions.pipe; +import static org.elasticsearch.xpack.sql.expression.function.scalar.FunctionTestUtils.randomStringLiteral; +import static org.elasticsearch.xpack.sql.tree.SourceTests.randomSource; + +public class DateTruncPipeTests extends AbstractNodeTestCase { + + @Override + protected DateTruncPipe randomInstance() { + return randomDateTruncPipe(); + } + + private Expression randomDateTruncPipeExpression() { + return randomDateTruncPipe().expression(); + } + + public static DateTruncPipe randomDateTruncPipe() { + return (DateTruncPipe) new DateTrunc( + randomSource(), + randomStringLiteral(), + randomStringLiteral(), + randomZone()) + .makePipe(); + } + + @Override + public void testTransform() { + // test transforming only the properties (source, expression), + // skipping the children (the two parameters of the binary function) which are tested separately + DateTruncPipe b1 = randomInstance(); + + Expression newExpression = randomValueOtherThan(b1.expression(), this::randomDateTruncPipeExpression); + DateTruncPipe newB = new DateTruncPipe( + b1.source(), + newExpression, + b1.left(), + b1.right(), + b1.zoneId()); + assertEquals(newB, b1.transformPropertiesOnly(v -> Objects.equals(v, b1.expression()) ? newExpression : v, Expression.class)); + + DateTruncPipe b2 = randomInstance(); + Source newLoc = randomValueOtherThan(b2.source(), SourceTests::randomSource); + newB = new DateTruncPipe( + newLoc, + b2.expression(), + b2.left(), + b2.right(), + b2.zoneId()); + assertEquals(newB, + b2.transformPropertiesOnly(v -> Objects.equals(v, b2.source()) ? newLoc : v, Source.class)); + } + + @Override + public void testReplaceChildren() { + DateTruncPipe b = randomInstance(); + Pipe newLeft = pipe(((Expression) randomValueOtherThan(b.left(), FunctionTestUtils::randomStringLiteral))); + Pipe newRight = pipe(((Expression) randomValueOtherThan(b.right(), FunctionTestUtils::randomDatetimeLiteral))); + ZoneId newZoneId = randomValueOtherThan(b.zoneId(), ESTestCase::randomZone); + DateTruncPipe newB = + new DateTruncPipe(b.source(), b.expression(), b.left(), b.right(), newZoneId); + BinaryPipe transformed = newB.replaceChildren(newLeft, b.right()); + + assertEquals(transformed.left(), newLeft); + assertEquals(transformed.source(), b.source()); + assertEquals(transformed.expression(), b.expression()); + assertEquals(transformed.right(), b.right()); + + transformed = newB.replaceChildren(b.left(), newRight); + assertEquals(transformed.left(), b.left()); + assertEquals(transformed.source(), b.source()); + assertEquals(transformed.expression(), b.expression()); + assertEquals(transformed.right(), newRight); + + transformed = newB.replaceChildren(newLeft, newRight); + assertEquals(transformed.left(), newLeft); + assertEquals(transformed.source(), b.source()); + assertEquals(transformed.expression(), b.expression()); + assertEquals(transformed.right(), newRight); + } + + @Override + protected DateTruncPipe mutate(DateTruncPipe instance) { + List> randoms = new ArrayList<>(); + randoms.add(f -> new DateTruncPipe(f.source(), + f.expression(), + pipe(((Expression) randomValueOtherThan(f.left(), FunctionTestUtils::randomStringLiteral))), + f.right(), + randomValueOtherThan(f.zoneId(), ESTestCase::randomZone))); + randoms.add(f -> new DateTruncPipe(f.source(), + f.expression(), + f.left(), + pipe(((Expression) randomValueOtherThan(f.right(), FunctionTestUtils::randomDatetimeLiteral))), + randomValueOtherThan(f.zoneId(), ESTestCase::randomZone))); + randoms.add(f -> new DateTruncPipe(f.source(), + f.expression(), + pipe(((Expression) randomValueOtherThan(f.left(), FunctionTestUtils::randomStringLiteral))), + pipe(((Expression) randomValueOtherThan(f.right(), FunctionTestUtils::randomDatetimeLiteral))), + randomValueOtherThan(f.zoneId(), ESTestCase::randomZone))); + + return randomFrom(randoms).apply(instance); + } + + @Override + protected DateTruncPipe copy(DateTruncPipe instance) { + return new DateTruncPipe(instance.source(), + instance.expression(), + instance.left(), + instance.right(), + instance.zoneId()); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTruncProcessorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTruncProcessorTests.java new file mode 100644 index 00000000000..47ce7477ddc --- /dev/null +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTruncProcessorTests.java @@ -0,0 +1,160 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.expression.function.scalar.datetime; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.AbstractSqlWireSerializingTestCase; +import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; +import org.elasticsearch.xpack.sql.expression.Literal; +import org.elasticsearch.xpack.sql.expression.gen.processor.ConstantProcessor; +import org.elasticsearch.xpack.sql.tree.Source; +import org.elasticsearch.xpack.sql.util.DateUtils; + +import java.time.ZoneId; +import java.time.ZonedDateTime; + +import static org.elasticsearch.xpack.sql.expression.Literal.NULL; +import static org.elasticsearch.xpack.sql.expression.function.scalar.FunctionTestUtils.l; +import static org.elasticsearch.xpack.sql.expression.function.scalar.FunctionTestUtils.randomDatetimeLiteral; +import static org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeTestUtils.dateTime; +import static org.elasticsearch.xpack.sql.proto.StringUtils.ISO_DATE_WITH_NANOS; + +public class DateTruncProcessorTests extends AbstractSqlWireSerializingTestCase { + + public static DateTruncProcessor randomDateTruncProcessor() { + return new DateTruncProcessor( + new ConstantProcessor(randomRealisticUnicodeOfLengthBetween(0, 128)), + new ConstantProcessor(ZonedDateTime.now()), + randomZone()); + } + + @Override + protected DateTruncProcessor createTestInstance() { + return randomDateTruncProcessor(); + } + + @Override + protected Reader instanceReader() { + return DateTruncProcessor::new; + } + + @Override + protected ZoneId instanceZoneId(DateTruncProcessor instance) { + return instance.zoneId(); + } + + @Override + protected DateTruncProcessor mutateInstance(DateTruncProcessor instance) { + return new DateTruncProcessor( + new ConstantProcessor(ESTestCase.randomRealisticUnicodeOfLength(128)), + new ConstantProcessor(ZonedDateTime.now()), + randomValueOtherThan(instance.zoneId(), ESTestCase::randomZone)); + } + + public void testInvalidInputs() { + SqlIllegalArgumentException siae = expectThrows(SqlIllegalArgumentException.class, + () -> new DateTrunc(Source.EMPTY, l(5), randomDatetimeLiteral(), randomZone()).makePipe().asProcessor().process(null)); + assertEquals("A string is required; received [5]", siae.getMessage()); + + siae = expectThrows(SqlIllegalArgumentException.class, + () -> new DateTrunc(Source.EMPTY, l("days"), l("foo"), randomZone()).makePipe().asProcessor().process(null)); + assertEquals("A datetime/date is required; received [foo]", siae.getMessage()); + + siae = expectThrows(SqlIllegalArgumentException.class, + () -> new DateTrunc(Source.EMPTY, l("invalid"), randomDatetimeLiteral(), randomZone()).makePipe().asProcessor().process(null)); + assertEquals("A value of [MILLENNIUM, CENTURY, DECADE, YEAR, QUARTER, MONTH, WEEK, DAY, HOUR, MINUTE, " + + "SECOND, MILLISECOND, MICROSECOND, NANOSECOND] or their aliases is required; received [invalid]", + siae.getMessage()); + + siae = expectThrows(SqlIllegalArgumentException.class, + () -> new DateTrunc(Source.EMPTY, l("dacede"), randomDatetimeLiteral(), randomZone()).makePipe().asProcessor().process(null)); + assertEquals("Received value [dacede] is not valid date part for truncation; did you mean [decade, decades]?", + siae.getMessage()); + } + + public void testWithNulls() { + assertNull(new DateTrunc(Source.EMPTY, NULL, randomDatetimeLiteral(), randomZone()).makePipe().asProcessor().process(null)); + assertNull(new DateTrunc(Source.EMPTY, l("days"), NULL, randomZone()).makePipe().asProcessor().process(null)); + assertNull(new DateTrunc(Source.EMPTY, NULL, NULL, randomZone()).makePipe().asProcessor().process(null)); + } + + public void testTruncation() { + ZoneId zoneId = ZoneId.of("Etc/GMT-10"); + Literal dateTime = l(dateTime(2019, 9, 3, 18, 10, 37, 123456789)); + + assertEquals("2000-01-01T00:00:00.000+10:00", + DateUtils.toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("millennia"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2000-01-01T00:00:00.000+10:00", + DateUtils.toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("CENTURY"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2010-01-01T00:00:00.000+10:00", + DateUtils.toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("decades"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2019-01-01T00:00:00.000+10:00", + DateUtils.toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("years"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2019-07-01T00:00:00.000+10:00", + toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("quarters"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2019-09-01T00:00:00.000+10:00", + toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("month"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2019-09-02T00:00:00.000+10:00", + toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("weeks"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2019-09-04T00:00:00.000+10:00", + toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("days"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2019-09-04T04:00:00.000+10:00", + toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("hh"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2019-09-04T04:10:00.000+10:00", + toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("mi"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2019-09-04T04:10:37.000+10:00", + toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("second"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2019-09-04T04:10:37.123+10:00", + toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("ms"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2019-09-04T04:10:37.123456+10:00", + toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("mcs"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + assertEquals("2019-09-04T04:10:37.123456789+10:00", + toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("nanoseconds"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + } + + public void testTruncationEdgeCases() { + ZoneId zoneId = ZoneId.of("Etc/GMT-10"); + Literal dateTime = l(dateTime(-11412, 9, 3, 18, 10, 37, 123456789)); + assertEquals("-11000-01-01T00:00:00.000+10:00", + DateUtils.toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("millennia"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + + dateTime = l(dateTime(-12999, 9, 3, 18, 10, 37, 123456789)); + assertEquals("-12900-01-01T00:00:00.000+10:00", + DateUtils.toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("centuries"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + + dateTime = l(dateTime(-32999, 9, 3, 18, 10, 37, 123456789)); + assertEquals("-32990-01-01T00:00:00.000+10:00", + DateUtils.toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("decades"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + + dateTime = l(dateTime(-1234, 9, 3, 18, 10, 37, 123456789)); + assertEquals("-1234-08-29T00:00:00.000+10:00", + DateUtils.toString((ZonedDateTime) new DateTrunc(Source.EMPTY, l("week"), dateTime, zoneId) + .makePipe().asProcessor().process(null))); + } + + private String toString(ZonedDateTime dateTime) { + return ISO_DATE_WITH_NANOS.format(dateTime); + } +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java index 1607c4db524..2d3b6cdee52 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/optimizer/OptimizerTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer.PruneSubqueryAliases; import org.elasticsearch.xpack.sql.expression.Alias; import org.elasticsearch.xpack.sql.expression.Expression; +import org.elasticsearch.xpack.sql.expression.Expression.TypeResolution; import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.FieldAttribute; import org.elasticsearch.xpack.sql.expression.Foldables; @@ -86,7 +87,9 @@ import org.elasticsearch.xpack.sql.optimizer.Optimizer.PropagateEquals; import org.elasticsearch.xpack.sql.optimizer.Optimizer.PruneDuplicateFunctions; import org.elasticsearch.xpack.sql.optimizer.Optimizer.ReplaceFoldableAttributes; import org.elasticsearch.xpack.sql.optimizer.Optimizer.ReplaceMinMaxWithTopHits; +import org.elasticsearch.xpack.sql.optimizer.Optimizer.SimplifyCase; import org.elasticsearch.xpack.sql.optimizer.Optimizer.SimplifyConditional; +import org.elasticsearch.xpack.sql.optimizer.Optimizer.SortAggregateOnOrderBy; import org.elasticsearch.xpack.sql.plan.logical.Aggregate; import org.elasticsearch.xpack.sql.plan.logical.Filter; import org.elasticsearch.xpack.sql.plan.logical.LocalRelation; @@ -112,13 +115,10 @@ import static java.util.Arrays.asList; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; -import static org.elasticsearch.xpack.sql.expression.Expression.TypeResolution; import static org.elasticsearch.xpack.sql.expression.Literal.FALSE; import static org.elasticsearch.xpack.sql.expression.Literal.NULL; import static org.elasticsearch.xpack.sql.expression.Literal.TRUE; import static org.elasticsearch.xpack.sql.expression.Literal.of; -import static org.elasticsearch.xpack.sql.optimizer.Optimizer.SimplifyCase; -import static org.elasticsearch.xpack.sql.optimizer.Optimizer.SortAggregateOnOrderBy; import static org.elasticsearch.xpack.sql.tree.Source.EMPTY; import static org.elasticsearch.xpack.sql.util.DateUtils.UTC; import static org.hamcrest.Matchers.contains; @@ -294,7 +294,7 @@ public class OptimizerTests extends ESTestCase { // check now with an alias result = new ConstantFolding().rule(new Alias(EMPTY, "a", exp)); assertEquals("a", Expressions.name(result)); - assertEquals(5, ((Literal) result).value()); + assertEquals(Alias.class, result.getClass()); } public void testConstantFoldingBinaryComparison() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index 80d9202d5bf..2967ccb581f 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -279,7 +279,7 @@ public class QueryTranslatorTests extends ESTestCase { } assertEquals("date", rq.field()); - if (operator.contains("<") || operator.equals("=") || operator.equals("!=")) { + if (operator.contains("<") || operator.equals("=") || operator.equals("!=")) { assertEquals(DateFormatter.forPattern(pattern).format(now.withNano(DateUtils.getNanoPrecision(null, now.getNano()))), rq.upper()); } @@ -293,6 +293,22 @@ public class QueryTranslatorTests extends ESTestCase { assertEquals(pattern, rq.format()); } + public void testTranslateDateTrunc_WhereClause_Painless() { + LogicalPlan p = plan("SELECT int FROM test WHERE DATE_TRUNC('month', date) > '2018-09-04'::date"); + assertTrue(p instanceof Project); + assertTrue(p.children().get(0) instanceof Filter); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertNull(translation.aggFilter); + assertTrue(translation.query instanceof ScriptQuery); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.gt(InternalSqlScriptUtils.dateTrunc(" + + "params.v0,InternalSqlScriptUtils.docValue(doc,params.v1),params.v2),InternalSqlScriptUtils.asDateTime(params.v3)))", + sc.script().toString()); + assertEquals("[{v=month}, {v=date}, {v=Z}, {v=2018-09-04T00:00:00.000Z}]", sc.script().params().toString()); + } + public void testLikeOnInexact() { LogicalPlan p = plan("SELECT * FROM test WHERE some.string LIKE '%a%'"); assertTrue(p instanceof Project); @@ -1190,9 +1206,31 @@ public class QueryTranslatorTests extends ESTestCase { assertEquals(EsQueryExec.class, p.getClass()); EsQueryExec eqe = (EsQueryExec) p; assertThat(eqe.queryContainer().toString().replaceAll("\\s+", ""), containsString( - "{\"terms\":{\"script\":{\"source\":\"InternalSqlScriptUtils." + scriptMethods[pos] + "{\"terms\":{\"script\":{\"source\":\"InternalSqlScriptUtils." + scriptMethods[pos] + "(InternalSqlScriptUtils.add(InternalSqlScriptUtils.docValue(doc,params.v0)," + "InternalSqlScriptUtils.intervalYearMonth(params.v1,params.v2)),params.v3)\",\"lang\":\"painless\"," + "\"params\":{\"v0\":\"date\",\"v1\":\"P1Y\",\"v2\":\"INTERVAL_YEAR\",\"v3\":\"Z\"}},\"missing_bucket\":true,")); } + + + public void testHavingWithLiteralImplicitGrouping() { + PhysicalPlan p = optimizeAndPlan("SELECT 1 FROM test HAVING COUNT(*) > 0"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertTrue("Should be tracking hits", eqe.queryContainer().shouldTrackHits()); + assertEquals(1, eqe.output().size()); + String query = eqe.queryContainer().toString().replaceAll("\\s+", ""); + assertThat(eqe.queryContainer().toString().replaceAll("\\s+", ""), containsString("\"size\":0")); + } + + public void testHavingWithColumnImplicitGrouping() { + PhysicalPlan p = optimizeAndPlan("SELECT MAX(int) FROM test HAVING COUNT(*) > 0"); + assertEquals(EsQueryExec.class, p.getClass()); + EsQueryExec eqe = (EsQueryExec) p; + assertTrue("Should be tracking hits", eqe.queryContainer().shouldTrackHits()); + assertEquals(1, eqe.output().size()); + assertThat(eqe.queryContainer().toString().replaceAll("\\s+", ""), containsString( + "\"script\":{\"source\":\"InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.gt(params.a0,params.v0))\"," + + "\"lang\":\"painless\",\"params\":{\"v0\":0}}")); + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java index 1e4a61265ec..ea5f9efc0be 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -621,12 +621,15 @@ public class DataTypeConversionTests extends ESTestCase { assertEquals(NULL, commonType(NULL, NULL)); assertEquals(INTEGER, commonType(INTEGER, KEYWORD)); assertEquals(LONG, commonType(TEXT, LONG)); - assertNull(commonType(TEXT, KEYWORD)); assertEquals(SHORT, commonType(SHORT, BYTE)); assertEquals(FLOAT, commonType(BYTE, FLOAT)); assertEquals(FLOAT, commonType(FLOAT, INTEGER)); assertEquals(DOUBLE, commonType(DOUBLE, FLOAT)); + // strings + assertEquals(TEXT, commonType(TEXT, KEYWORD)); + assertEquals(TEXT, commonType(KEYWORD, TEXT)); + // numeric and intervals assertEquals(INTERVAL_YEAR_TO_MONTH, commonType(INTERVAL_YEAR_TO_MONTH, LONG)); assertEquals(INTERVAL_HOUR_TO_MINUTE, commonType(INTEGER, INTERVAL_HOUR_TO_MINUTE)); diff --git a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java index ba63034c170..700d896f253 100644 --- a/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java +++ b/x-pack/plugin/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestIT.java @@ -87,7 +87,10 @@ public class XPackRestIT extends ESClientYamlSuiteTestCase { private void waitForTemplates() throws Exception { if (installTemplates()) { List templates = new ArrayList<>(); - templates.addAll(Arrays.asList(AuditorField.NOTIFICATIONS_INDEX, MlMetaIndex.INDEX_NAME, + templates.addAll( + Arrays.asList( + AuditorField.NOTIFICATIONS_INDEX, + MlMetaIndex.INDEX_NAME, AnomalyDetectorsIndexFields.STATE_INDEX_PREFIX, AnomalyDetectorsIndex.jobResultsIndexPrefix(), AnomalyDetectorsIndex.configIndexName())); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.delete_data_frame_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.delete_data_frame_transform.json index 5c09d1b60b6..d8545728ab9 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.delete_data_frame_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.delete_data_frame_transform.json @@ -1,7 +1,7 @@ { "data_frame.delete_data_frame_transform":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-data-frame-transform.html" + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/delete-transform.html" }, "stability":"beta", "url":{ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.get_data_frame_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.get_data_frame_transform.json index 812648c5fb1..e25a3301ec0 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.get_data_frame_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.get_data_frame_transform.json @@ -1,7 +1,7 @@ { "data_frame.get_data_frame_transform":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/get-data-frame-transform.html" + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform.html" }, "stability":"beta", "url":{ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.get_data_frame_transform_stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.get_data_frame_transform_stats.json index 61083d1fd78..57b004482a7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.get_data_frame_transform_stats.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.get_data_frame_transform_stats.json @@ -1,7 +1,7 @@ { "data_frame.get_data_frame_transform_stats":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/get-data-frame-transform-stats.html" + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-stats.html" }, "stability":"beta", "url":{ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.preview_data_frame_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.preview_data_frame_transform.json index a0d585c203a..e3f24448b9f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.preview_data_frame_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.preview_data_frame_transform.json @@ -1,7 +1,7 @@ { "data_frame.preview_data_frame_transform":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-data-frame-transform.html" + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/preview-transform.html" }, "stability":"beta", "url":{ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.put_data_frame_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.put_data_frame_transform.json index b4996d22861..5e7354f435d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.put_data_frame_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.put_data_frame_transform.json @@ -1,7 +1,7 @@ { "data_frame.put_data_frame_transform":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/put-data-frame-transform.html" + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/put-transform.html" }, "stability":"beta", "url":{ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.start_data_frame_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.start_data_frame_transform.json index b2e4997ebbe..c0d701be562 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.start_data_frame_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.start_data_frame_transform.json @@ -1,7 +1,7 @@ { "data_frame.start_data_frame_transform":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/start-data-frame-transform.html" + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/start-transform.html" }, "stability":"beta", "url":{ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.stop_data_frame_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.stop_data_frame_transform.json index af0c35b1563..c88a7176309 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.stop_data_frame_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.stop_data_frame_transform.json @@ -1,7 +1,7 @@ { "data_frame.stop_data_frame_transform":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-data-frame-transform.html" + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-transform.html" }, "stability":"beta", "url":{ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.update_data_frame_transform.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.update_data_frame_transform.json index 5dcf2777965..70d1342815e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.update_data_frame_transform.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/data_frame.update_data_frame_transform.json @@ -1,7 +1,7 @@ { "data_frame.update_data_frame_transform": { "documentation": { - "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-data-frame-transform.html" + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/update-transform.html" }, "stability": "beta", "url": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.delete_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.delete_lifecycle.json index 72b30f586a8..8f18b5fcdad 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.delete_lifecycle.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.delete_lifecycle.json @@ -1,7 +1,7 @@ { "slm.delete_lifecycle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api.html" + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-delete.html" }, "stability":"stable", "url":{ @@ -10,7 +10,13 @@ "path":"/_slm/policy/{policy_id}", "methods":[ "DELETE" - ] + ], + "parts":{ + "policy_id":{ + "type":"string", + "description":"The id of the snapshot lifecycle policy to remove" + } + } } ] }, diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.execute_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.execute_lifecycle.json index 0f0cda611cb..6353fac6391 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.execute_lifecycle.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.execute_lifecycle.json @@ -1,7 +1,7 @@ { "slm.execute_lifecycle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api.html" + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-execute.html" }, "stability":"stable", "url":{ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.get_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.get_lifecycle.json index 0a3da410733..79d9cf5ece6 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.get_lifecycle.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.get_lifecycle.json @@ -1,7 +1,7 @@ { "slm.get_lifecycle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api.html" + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-get.html" }, "stability":"stable", "url":{ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.get_stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.get_stats.json new file mode 100644 index 00000000000..233d302baee --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.get_stats.json @@ -0,0 +1,19 @@ +{ + "slm.get_stats":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api.html" + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_slm/stats", + "methods":[ + "GET" + ] + } + ] + }, + "params":{} + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.put_lifecycle.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.put_lifecycle.json index f4bb1062575..4028d924c3d 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.put_lifecycle.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/slm.put_lifecycle.json @@ -1,7 +1,7 @@ { "slm.put_lifecycle":{ "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api.html" + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/slm-api-put.html" }, "stability":"stable", "url":{ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml index 93b69bbc3e2..57260996bae 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_crud.yml @@ -622,7 +622,7 @@ setup: - match: { transforms.0.state: "/started|indexing/" } - do: - catch: /Cannot delete data frame \[airline-transform-start-delete\] as the task is running/ + catch: /Cannot delete transform \[airline-transform-start-delete\] as the task is running/ data_frame.delete_data_frame_transform: transform_id: "airline-transform-start-delete" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index 044f5212a99..54805ffd7e7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -5,6 +5,9 @@ setup: body: mappings: properties: + time_alias: + type: alias + path: time time: type: date airline: @@ -59,7 +62,7 @@ teardown: - match: { acknowledged: true } - do: - catch: /Unable to start data frame transform \[airline-transform-start-stop\] as it is in state \[STARTED\]/ + catch: /Cannot start transform \[airline-transform-start-stop\] as it is already started/ data_frame.start_data_frame_transform: transform_id: "airline-transform-start-stop" @@ -322,3 +325,27 @@ teardown: - do: data_frame.delete_data_frame_transform: transform_id: "airline-transform-stop-all" +--- +"Test start/stop with field alias": + - do: + data_frame.put_data_frame_transform: + transform_id: "airline_via_field_alias" + body: > + { + "source": {"index": "airline-data"}, + "dest": {"index": "airline-data-time-alias"}, + "pivot": { + "group_by": {"time": {"date_histogram": {"field": "time_alias", "calendar_interval": "1m"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + - do: + data_frame.start_data_frame_transform: + transform_id: "airline_via_field_alias" + - match: { acknowledged: true } + + - do: + indices.get_mapping: + index: airline-data-time-alias + - match: { airline-data-time-alias.mappings.properties.time.type: date } + - match: { airline-data-time-alias.mappings.properties.avg_response.type: double } diff --git a/x-pack/plugin/data-frame/build.gradle b/x-pack/plugin/transform/build.gradle similarity index 87% rename from x-pack/plugin/data-frame/build.gradle rename to x-pack/plugin/transform/build.gradle index 03c89994e97..28824750480 100644 --- a/x-pack/plugin/data-frame/build.gradle +++ b/x-pack/plugin/transform/build.gradle @@ -2,9 +2,9 @@ evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' esplugin { - name 'data-frame' - description 'A plugin to build data frames' - classname 'org.elasticsearch.xpack.dataframe.DataFrame' + name 'transform' + description 'A plugin to transform data' + classname 'org.elasticsearch.xpack.transform.Transform' extendedPlugins = ['x-pack-core'] } diff --git a/x-pack/plugin/data-frame/qa/build.gradle b/x-pack/plugin/transform/qa/build.gradle similarity index 100% rename from x-pack/plugin/data-frame/qa/build.gradle rename to x-pack/plugin/transform/qa/build.gradle diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/build.gradle b/x-pack/plugin/transform/qa/multi-node-tests/build.gradle similarity index 96% rename from x-pack/plugin/data-frame/qa/multi-node-tests/build.gradle rename to x-pack/plugin/transform/qa/multi-node-tests/build.gradle index fe394adfc97..cec942efa84 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/build.gradle +++ b/x-pack/plugin/transform/qa/multi-node-tests/build.gradle @@ -5,7 +5,7 @@ apply plugin: 'elasticsearch.rest-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') - testCompile project(path: xpackModule('data-frame'), configuration: 'runtime') + testCompile project(path: xpackModule('transform'), configuration: 'runtime') testCompile project(':client:rest-high-level') } diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java b/x-pack/plugin/transform/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformIT.java similarity index 78% rename from x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java rename to x-pack/plugin/transform/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformIT.java index b239d461b1c..d394636507c 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformIT.java +++ b/x-pack/plugin/transform/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformIT.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.integration; +package org.elasticsearch.xpack.transform.integration; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -16,13 +16,13 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStats; -import org.elasticsearch.client.dataframe.transforms.DestConfig; -import org.elasticsearch.client.dataframe.transforms.TimeSyncConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.SingleGroupSource; -import org.elasticsearch.client.dataframe.transforms.pivot.TermsGroupSource; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigUpdate; +import org.elasticsearch.client.transform.transforms.DataFrameTransformStats; +import org.elasticsearch.client.transform.transforms.DestConfig; +import org.elasticsearch.client.transform.transforms.TimeSyncConfig; +import org.elasticsearch.client.transform.transforms.pivot.SingleGroupSource; +import org.elasticsearch.client.transform.transforms.pivot.TermsGroupSource; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -45,14 +45,14 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.oneOf; -public class DataFrameTransformIT extends DataFrameIntegTestCase { +public class TransformIT extends TransformIntegTestCase { @After public void cleanTransforms() throws IOException { cleanUp(); } - public void testDataFrameTransformCrud() throws Exception { + public void testTransformCrud() throws Exception { String indexName = "basic-crud-reviews"; createReviewsIndex(indexName, 100); @@ -65,27 +65,27 @@ public class DataFrameTransformIT extends DataFrameIntegTestCase { .addAggregator(AggregationBuilders.avg("review_score").field("stars")) .addAggregator(AggregationBuilders.max("timestamp").field("timestamp")); - DataFrameTransformConfig config = createTransformConfig("data-frame-transform-crud", + DataFrameTransformConfig config = createTransformConfig("transform-crud", groups, aggs, "reviews-by-user-business-day", indexName); - assertTrue(putDataFrameTransform(config, RequestOptions.DEFAULT).isAcknowledged()); - assertTrue(startDataFrameTransform(config.getId(), RequestOptions.DEFAULT).isAcknowledged()); + assertTrue(putTransform(config, RequestOptions.DEFAULT).isAcknowledged()); + assertTrue(startTransform(config.getId(), RequestOptions.DEFAULT).isAcknowledged()); waitUntilCheckpoint(config.getId(), 1L); - stopDataFrameTransform(config.getId()); + stopTransform(config.getId()); - DataFrameTransformConfig storedConfig = getDataFrameTransform(config.getId()).getTransformConfigurations().get(0); + DataFrameTransformConfig storedConfig = getTransform(config.getId()).getTransformConfigurations().get(0); assertThat(storedConfig.getVersion(), equalTo(Version.CURRENT)); Instant now = Instant.now(); assertTrue("[create_time] is not before current time", storedConfig.getCreateTime().isBefore(now)); - deleteDataFrameTransform(config.getId()); + deleteTransform(config.getId()); } - public void testContinuousDataFrameTransformCrud() throws Exception { + public void testContinuousTransformCrud() throws Exception { String indexName = "continuous-crud-reviews"; createReviewsIndex(indexName, 100); @@ -98,7 +98,7 @@ public class DataFrameTransformIT extends DataFrameIntegTestCase { .addAggregator(AggregationBuilders.avg("review_score").field("stars")) .addAggregator(AggregationBuilders.max("timestamp").field("timestamp")); - DataFrameTransformConfig config = createTransformConfigBuilder("data-frame-transform-crud", + DataFrameTransformConfig config = createTransformConfigBuilder("transform-crud", groups, aggs, "reviews-by-user-business-day", @@ -107,20 +107,20 @@ public class DataFrameTransformIT extends DataFrameIntegTestCase { .setSyncConfig(new TimeSyncConfig("timestamp", TimeValue.timeValueSeconds(1))) .build(); - assertTrue(putDataFrameTransform(config, RequestOptions.DEFAULT).isAcknowledged()); - assertTrue(startDataFrameTransform(config.getId(), RequestOptions.DEFAULT).isAcknowledged()); + assertTrue(putTransform(config, RequestOptions.DEFAULT).isAcknowledged()); + assertTrue(startTransform(config.getId(), RequestOptions.DEFAULT).isAcknowledged()); waitUntilCheckpoint(config.getId(), 1L); - assertThat(getDataFrameTransformStats(config.getId()).getTransformsStats().get(0).getState(), + assertThat(getTransformStats(config.getId()).getTransformsStats().get(0).getState(), equalTo(DataFrameTransformStats.State.STARTED)); - long docsIndexed = getDataFrameTransformStats(config.getId()) + long docsIndexed = getTransformStats(config.getId()) .getTransformsStats() .get(0) .getIndexerStats() .getNumDocuments(); - DataFrameTransformConfig storedConfig = getDataFrameTransform(config.getId()).getTransformConfigurations().get(0); + DataFrameTransformConfig storedConfig = getTransform(config.getId()).getTransformConfigurations().get(0); assertThat(storedConfig.getVersion(), equalTo(Version.CURRENT)); Instant now = Instant.now(); assertTrue("[create_time] is not before current time", storedConfig.getCreateTime().isBefore(now)); @@ -132,17 +132,17 @@ public class DataFrameTransformIT extends DataFrameIntegTestCase { waitUntilCheckpoint(config.getId(), 2L); // Assert that we wrote the new docs - assertThat(getDataFrameTransformStats(config.getId()) + assertThat(getTransformStats(config.getId()) .getTransformsStats() .get(0) .getIndexerStats() .getNumDocuments(), greaterThan(docsIndexed)); - stopDataFrameTransform(config.getId()); - deleteDataFrameTransform(config.getId()); + stopTransform(config.getId()); + deleteTransform(config.getId()); } - public void testContinuousDataFrameTransformUpdate() throws Exception { + public void testContinuousTransformUpdate() throws Exception { String indexName = "continuous-reviews-update"; createReviewsIndex(indexName, 10); @@ -153,7 +153,7 @@ public class DataFrameTransformIT extends DataFrameIntegTestCase { .addAggregator(AggregationBuilders.avg("review_score").field("stars")) .addAggregator(AggregationBuilders.max("timestamp").field("timestamp")); - String id = "data-frame-transform-to-update"; + String id = "transform-to-update"; String dest = "reviews-by-user-business-day-to-update"; DataFrameTransformConfig config = createTransformConfigBuilder(id, groups, @@ -164,20 +164,20 @@ public class DataFrameTransformIT extends DataFrameIntegTestCase { .setSyncConfig(new TimeSyncConfig("timestamp", TimeValue.timeValueSeconds(1))) .build(); - assertTrue(putDataFrameTransform(config, RequestOptions.DEFAULT).isAcknowledged()); - assertTrue(startDataFrameTransform(config.getId(), RequestOptions.DEFAULT).isAcknowledged()); + assertTrue(putTransform(config, RequestOptions.DEFAULT).isAcknowledged()); + assertTrue(startTransform(config.getId(), RequestOptions.DEFAULT).isAcknowledged()); waitUntilCheckpoint(config.getId(), 1L); - assertThat(getDataFrameTransformStats(config.getId()).getTransformsStats().get(0).getState(), + assertThat(getTransformStats(config.getId()).getTransformsStats().get(0).getState(), oneOf(DataFrameTransformStats.State.STARTED, DataFrameTransformStats.State.INDEXING)); - long docsIndexed = getDataFrameTransformStats(config.getId()) + long docsIndexed = getTransformStats(config.getId()) .getTransformsStats() .get(0) .getIndexerStats() .getNumDocuments(); - DataFrameTransformConfig storedConfig = getDataFrameTransform(config.getId()).getTransformConfigurations().get(0); + DataFrameTransformConfig storedConfig = getTransform(config.getId()).getTransformConfigurations().get(0); assertThat(storedConfig.getVersion(), equalTo(Version.CURRENT)); Instant now = Instant.now(); assertTrue("[create_time] is not before current time", storedConfig.getCreateTime().isBefore(now)); @@ -212,7 +212,7 @@ public class DataFrameTransformIT extends DataFrameIntegTestCase { // Since updates are loaded on checkpoint start, we should see the updated config on this next run waitUntilCheckpoint(config.getId(), 2L); - long numDocsAfterCp2 = getDataFrameTransformStats(config.getId()) + long numDocsAfterCp2 = getTransformStats(config.getId()) .getTransformsStats() .get(0) .getIndexerStats() @@ -231,8 +231,8 @@ public class DataFrameTransformIT extends DataFrameIntegTestCase { hlrc.indices().refresh(new RefreshRequest(dest), RequestOptions.DEFAULT); }, 30, TimeUnit.SECONDS); - stopDataFrameTransform(config.getId()); - deleteDataFrameTransform(config.getId()); + stopTransform(config.getId()); + deleteTransform(config.getId()); } private void indexMoreDocs(long timestamp, long userId, String index) throws Exception { diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java b/x-pack/plugin/transform/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformIntegTestCase.java similarity index 86% rename from x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java rename to x-pack/plugin/transform/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformIntegTestCase.java index 4c6723de29c..d1cde296664 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java +++ b/x-pack/plugin/transform/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformIntegTestCase.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.integration; +package org.elasticsearch.xpack.transform.integration; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -14,29 +14,29 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.core.AcknowledgedResponse; -import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; -import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StartDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StartDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.StopDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.StopDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.UpdateDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate; -import org.elasticsearch.client.dataframe.transforms.DestConfig; -import org.elasticsearch.client.dataframe.transforms.QueryConfig; -import org.elasticsearch.client.dataframe.transforms.SourceConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.AggregationConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.DateHistogramGroupSource; -import org.elasticsearch.client.dataframe.transforms.pivot.GroupConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.SingleGroupSource; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.CreateIndexResponse; +import org.elasticsearch.client.transform.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformResponse; +import org.elasticsearch.client.transform.GetDataFrameTransformStatsRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformStatsResponse; +import org.elasticsearch.client.transform.PutDataFrameTransformRequest; +import org.elasticsearch.client.transform.StartDataFrameTransformRequest; +import org.elasticsearch.client.transform.StartDataFrameTransformResponse; +import org.elasticsearch.client.transform.StopDataFrameTransformRequest; +import org.elasticsearch.client.transform.StopDataFrameTransformResponse; +import org.elasticsearch.client.transform.UpdateDataFrameTransformRequest; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigUpdate; +import org.elasticsearch.client.transform.transforms.DestConfig; +import org.elasticsearch.client.transform.transforms.QueryConfig; +import org.elasticsearch.client.transform.transforms.SourceConfig; +import org.elasticsearch.client.transform.transforms.pivot.AggregationConfig; +import org.elasticsearch.client.transform.transforms.pivot.DateHistogramGroupSource; +import org.elasticsearch.client.transform.transforms.pivot.GroupConfig; +import org.elasticsearch.client.transform.transforms.pivot.PivotConfig; +import org.elasticsearch.client.transform.transforms.pivot.SingleGroupSource; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -68,7 +68,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.core.Is.is; -abstract class DataFrameIntegTestCase extends ESRestTestCase { +abstract class TransformIntegTestCase extends ESRestTestCase { private Map transformConfigs = new HashMap<>(); @@ -79,23 +79,23 @@ abstract class DataFrameIntegTestCase extends ESRestTestCase { protected void cleanUpTransforms() throws IOException { for (DataFrameTransformConfig config : transformConfigs.values()) { - stopDataFrameTransform(config.getId()); - deleteDataFrameTransform(config.getId()); + stopTransform(config.getId()); + deleteTransform(config.getId()); } transformConfigs.clear(); } - protected StopDataFrameTransformResponse stopDataFrameTransform(String id) throws IOException { + protected StopDataFrameTransformResponse stopTransform(String id) throws IOException { RestHighLevelClient restClient = new TestRestHighLevelClient(); return restClient.dataFrame().stopDataFrameTransform(new StopDataFrameTransformRequest(id, true, null), RequestOptions.DEFAULT); } - protected StartDataFrameTransformResponse startDataFrameTransform(String id, RequestOptions options) throws IOException { + protected StartDataFrameTransformResponse startTransform(String id, RequestOptions options) throws IOException { RestHighLevelClient restClient = new TestRestHighLevelClient(); return restClient.dataFrame().startDataFrameTransform(new StartDataFrameTransformRequest(id), options); } - protected AcknowledgedResponse deleteDataFrameTransform(String id) throws IOException { + protected AcknowledgedResponse deleteTransform(String id) throws IOException { RestHighLevelClient restClient = new TestRestHighLevelClient(); AcknowledgedResponse response = restClient.dataFrame().deleteDataFrameTransform(new DeleteDataFrameTransformRequest(id), RequestOptions.DEFAULT); @@ -105,9 +105,9 @@ abstract class DataFrameIntegTestCase extends ESRestTestCase { return response; } - protected AcknowledgedResponse putDataFrameTransform(DataFrameTransformConfig config, RequestOptions options) throws IOException { + protected AcknowledgedResponse putTransform(DataFrameTransformConfig config, RequestOptions options) throws IOException { if (transformConfigs.keySet().contains(config.getId())) { - throw new IllegalArgumentException("data frame transform [" + config.getId() + "] is already registered"); + throw new IllegalArgumentException("transform [" + config.getId() + "] is already registered"); } RestHighLevelClient restClient = new TestRestHighLevelClient(); AcknowledgedResponse response = @@ -118,12 +118,12 @@ abstract class DataFrameIntegTestCase extends ESRestTestCase { return response; } - protected GetDataFrameTransformStatsResponse getDataFrameTransformStats(String id) throws IOException { + protected GetDataFrameTransformStatsResponse getTransformStats(String id) throws IOException { RestHighLevelClient restClient = new TestRestHighLevelClient(); return restClient.dataFrame().getDataFrameTransformStats(new GetDataFrameTransformStatsRequest(id), RequestOptions.DEFAULT); } - protected GetDataFrameTransformResponse getDataFrameTransform(String id) throws IOException { + protected GetDataFrameTransformResponse getTransform(String id) throws IOException { RestHighLevelClient restClient = new TestRestHighLevelClient(); return restClient.dataFrame().getDataFrameTransform(new GetDataFrameTransformRequest(id), RequestOptions.DEFAULT); } @@ -134,7 +134,7 @@ abstract class DataFrameIntegTestCase extends ESRestTestCase { protected void waitUntilCheckpoint(String id, long checkpoint, TimeValue waitTime) throws Exception { assertBusy(() -> - assertEquals(checkpoint, getDataFrameTransformStats(id) + assertEquals(checkpoint, getTransformStats(id) .getTransformsStats() .get(0) .getCheckpointingInfo() @@ -215,7 +215,7 @@ abstract class DataFrameIntegTestCase extends ESRestTestCase { .setDest(DestConfig.builder().setIndex(destinationIndex).build()) .setFrequency(TimeValue.timeValueSeconds(10)) .setPivotConfig(createPivotConfig(groups, aggregations)) - .setDescription("Test data frame transform config id: " + id); + .setDescription("Test transform config id: " + id); } protected DataFrameTransformConfig createTransformConfig(String id, diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle b/x-pack/plugin/transform/qa/single-node-tests/build.gradle similarity index 87% rename from x-pack/plugin/data-frame/qa/single-node-tests/build.gradle rename to x-pack/plugin/transform/qa/single-node-tests/build.gradle index 3100c2002c5..95cb164ff9a 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/build.gradle +++ b/x-pack/plugin/transform/qa/single-node-tests/build.gradle @@ -5,7 +5,7 @@ apply plugin: 'elasticsearch.rest-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') - testCompile project(path: xpackModule('data-frame'), configuration: 'runtime') + testCompile project(path: xpackModule('transform'), configuration: 'runtime') testCompile project(':client:rest-high-level') } @@ -14,4 +14,4 @@ testClusters.integTest { setting 'xpack.security.enabled', 'true' setting 'xpack.license.self_generated.type', 'trial' user username: "x_pack_rest_user", password: "x-pack-test-password" -} \ No newline at end of file +} diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformAuditorIT.java similarity index 78% rename from x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java rename to x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformAuditorIT.java index b7aaa5c567c..3c433c48bdd 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformAuditorIT.java @@ -4,10 +4,10 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.integration; +package org.elasticsearch.xpack.transform.integration; import org.elasticsearch.client.Request; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndex; import org.junit.Before; import java.io.IOException; @@ -22,11 +22,11 @@ import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.is; -public class DataFrameAuditorIT extends DataFrameRestTestCase { +public class TransformAuditorIT extends TransformRestTestCase { - private static final String TEST_USER_NAME = "df_admin_plus_data"; + private static final String TEST_USER_NAME = "transform_admin_plus_data"; private static final String DATA_ACCESS_ROLE = "test_data_access"; - private static final String BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS = + private static final String BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS = basicAuthHeaderValue(TEST_USER_NAME, TEST_PASSWORD_SECURE_STRING); private static boolean indicesCreated = false; @@ -54,24 +54,24 @@ public class DataFrameAuditorIT extends DataFrameRestTestCase { @SuppressWarnings("unchecked") public void testAuditorWritesAudits() throws Exception { String transformId = "simple_pivot_for_audit"; - String dataFrameIndex = "pivot_reviews_user_id_above_20"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String transformIndex = "pivot_reviews_user_id_above_20"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); String query = "\"match\": {\"user_id\": \"user_26\"}"; - createPivotReviewsTransform(transformId, dataFrameIndex, query, null, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + createPivotReviewsTransform(transformId, transformIndex, query, null, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); // Make sure we wrote to the audit - final Request request = new Request("GET", DataFrameInternalIndex.AUDIT_INDEX + "/_search"); + final Request request = new Request("GET", TransformInternalIndex.AUDIT_INDEX + "/_search"); request.setJsonEntity("{\"query\":{\"term\":{\"transform_id\":\"simple_pivot_for_audit\"}}}"); assertBusy(() -> { - assertTrue(indexExists(DataFrameInternalIndex.AUDIT_INDEX)); + assertTrue(indexExists(TransformInternalIndex.AUDIT_INDEX)); }); // Since calls to write the AbstractAuditor are sent and forgot (async) we could have returned from the start, // finished the job (as this is a very short DF job), all without the audit being fully written. assertBusy(() -> { - refreshIndex(DataFrameInternalIndex.AUDIT_INDEX); + refreshIndex(TransformInternalIndex.AUDIT_INDEX); Map response = entityAsMap(client().performRequest(request)); List hitList = ((List) ((Map)response.get("hits")).get("hits")); assertThat(hitList, is(not(empty()))); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformConfigurationIndexIT.java similarity index 72% rename from x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java rename to x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformConfigurationIndexIT.java index 499f62f13ea..ad360c65816 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformConfigurationIndexIT.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.integration; +package org.elasticsearch.xpack.transform.integration; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; @@ -14,21 +14,21 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndex; import java.io.IOException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase { +public class TransformConfigurationIndexIT extends TransformRestTestCase { /** * Tests the corner case that for some reason a transform configuration still exists in the index but * the persistent task disappeared * - * test note: {@link DataFrameRestTestCase} checks for an empty index as part of the test case cleanup, + * test note: {@link TransformRestTestCase} checks for an empty index as part of the test case cleanup, * so we do not need to check that the document has been deleted in this place */ public void testDeleteConfigurationLeftOver() throws IOException { @@ -37,20 +37,20 @@ public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); { - builder.field(DataFrameField.ID.getPreferredName(), fakeTransformName); + builder.field(TransformField.ID.getPreferredName(), fakeTransformName); } builder.endObject(); final StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); Request req = new Request("PUT", - DataFrameInternalIndex.LATEST_INDEX_NAME + "/_doc/" + DataFrameTransformConfig.documentId(fakeTransformName)); + TransformInternalIndex.LATEST_INDEX_NAME + "/_doc/" + TransformConfig.documentId(fakeTransformName)); req.setEntity(entity); client().performRequest(req); } // refresh the index - assertOK(client().performRequest(new Request("POST", DataFrameInternalIndex.LATEST_INDEX_NAME + "/_refresh"))); + assertOK(client().performRequest(new Request("POST", TransformInternalIndex.LATEST_INDEX_NAME + "/_refresh"))); - Request deleteRequest = new Request("DELETE", DATAFRAME_ENDPOINT + fakeTransformName); + Request deleteRequest = new Request("DELETE", TRANSFORM_ENDPOINT + fakeTransformName); Response deleteResponse = client().performRequest(deleteRequest); assertOK(deleteResponse); assertTrue((boolean)XContentMapValues.extractValue("acknowledged", entityAsMap(deleteResponse))); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformGetAndGetStatsIT.java similarity index 84% rename from x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java rename to x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformGetAndGetStatsIT.java index 3a7809125c7..7715086346b 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameGetAndGetStatsIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformGetAndGetStatsIT.java @@ -4,11 +4,11 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.integration; +package org.elasticsearch.xpack.transform.integration; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.transform.TransformField; import org.junit.After; import org.junit.Before; @@ -26,13 +26,13 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.oneOf; -public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { +public class TransformGetAndGetStatsIT extends TransformRestTestCase { - private static final String TEST_USER_NAME = "df_user"; - private static final String BASIC_AUTH_VALUE_DATA_FRAME_USER = + private static final String TEST_USER_NAME = "transform_user"; + private static final String BASIC_AUTH_VALUE_TRANSFORM_USER = basicAuthHeaderValue(TEST_USER_NAME, TEST_PASSWORD_SECURE_STRING); - private static final String TEST_ADMIN_USER_NAME = "df_admin"; - private static final String BASIC_AUTH_VALUE_DATA_FRAME_ADMIN = + private static final String TEST_ADMIN_USER_NAME = "transform_admin"; + private static final String BASIC_AUTH_VALUE_TRANSFORM_ADMIN = basicAuthHeaderValue(TEST_ADMIN_USER_NAME, TEST_PASSWORD_SECURE_STRING); private static boolean indicesCreated = false; @@ -59,7 +59,7 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { @After public void clearOutTransforms() throws Exception { - wipeDataFrameTransforms(); + wipeTransforms(); } @SuppressWarnings("unchecked") @@ -71,26 +71,26 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { startAndWaitForTransform("pivot_1", "pivot_reviews_1"); startAndWaitForTransform("pivot_2", "pivot_reviews_2"); startAndWaitForContinuousTransform("pivot_continuous", "pivot_reviews_continuous", null); - stopDataFrameTransform("pivot_1", false); - stopDataFrameTransform("pivot_2", false); + stopTransform("pivot_1", false); + stopTransform("pivot_2", false); // Alternate testing between admin and lowly user, as both should be able to get the configs and stats - String authHeader = randomFrom(BASIC_AUTH_VALUE_DATA_FRAME_USER, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN); + String authHeader = randomFrom(BASIC_AUTH_VALUE_TRANSFORM_USER, BASIC_AUTH_VALUE_TRANSFORM_ADMIN); // check all the different ways to retrieve all stats - Request getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + "_stats", authHeader); + Request getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "_stats", authHeader); Map stats = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", stats)); - getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + "_all/_stats", authHeader); + getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "_all/_stats", authHeader); stats = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", stats)); - getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + "*/_stats", authHeader); + getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "*/_stats", authHeader); stats = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", stats)); - getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + "pivot_1,pivot_2/_stats", authHeader); + getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "pivot_1,pivot_2/_stats", authHeader); stats = entityAsMap(client().performRequest(getRequest)); assertEquals(2, XContentMapValues.extractValue("count", stats)); - getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + "pivot_*/_stats", authHeader); + getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "pivot_*/_stats", authHeader); stats = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", stats)); @@ -111,7 +111,7 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { } // only pivot_1 - getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + "pivot_1/_stats", authHeader); + getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "pivot_1/_stats", authHeader); stats = entityAsMap(client().performRequest(getRequest)); assertEquals(1, XContentMapValues.extractValue("count", stats)); @@ -122,7 +122,7 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { assertEquals(1, XContentMapValues.extractValue("checkpointing.last.checkpoint", transformsStats.get(0))); // only continuous - getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + "pivot_continuous/_stats", authHeader); + getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "pivot_continuous/_stats", authHeader); stats = entityAsMap(client().performRequest(getRequest)); assertEquals(1, XContentMapValues.extractValue("count", stats)); @@ -133,42 +133,42 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { // check all the different ways to retrieve all transforms - getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT, authHeader); + getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT, authHeader); Map transforms = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", transforms)); - getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + "_all", authHeader); + getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "_all", authHeader); transforms = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", transforms)); - getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + "*", authHeader); + getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "*", authHeader); transforms = entityAsMap(client().performRequest(getRequest)); assertEquals(3, XContentMapValues.extractValue("count", transforms)); // only pivot_1 - getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + "pivot_1", authHeader); + getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "pivot_1", authHeader); transforms = entityAsMap(client().performRequest(getRequest)); assertEquals(1, XContentMapValues.extractValue("count", transforms)); - stopDataFrameTransform("pivot_continuous", false); + stopTransform("pivot_continuous", false); } @SuppressWarnings("unchecked") public void testGetPersistedStatsWithoutTask() throws Exception { createPivotReviewsTransform("pivot_stats_1", "pivot_reviews_stats_1", null); startAndWaitForTransform("pivot_stats_1", "pivot_reviews_stats_1"); - stopDataFrameTransform("pivot_stats_1", false); + stopTransform("pivot_stats_1", false); // Get rid of the first transform task, but keep the configuration - client().performRequest(new Request("POST", "_tasks/_cancel?actions="+DataFrameField.TASK_NAME+"*")); + client().performRequest(new Request("POST", "_tasks/_cancel?actions="+TransformField.TASK_NAME+"*")); // Verify that the task is gone Map tasks = - entityAsMap(client().performRequest(new Request("GET", "_tasks?actions="+DataFrameField.TASK_NAME+"*"))); + entityAsMap(client().performRequest(new Request("GET", "_tasks?actions="+TransformField.TASK_NAME+"*"))); assertTrue(((Map)XContentMapValues.extractValue("nodes", tasks)).isEmpty()); createPivotReviewsTransform("pivot_stats_2", "pivot_reviews_stats_2", null); startAndWaitForTransform("pivot_stats_2", "pivot_reviews_stats_2"); - Request getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + "_stats", BASIC_AUTH_VALUE_DATA_FRAME_ADMIN); + Request getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + "_stats", BASIC_AUTH_VALUE_TRANSFORM_ADMIN); Map stats = entityAsMap(client().performRequest(getRequest)); assertEquals(2, XContentMapValues.extractValue("count", stats)); List> transformsStats = (List>)XContentMapValues.extractValue("transforms", stats); @@ -184,15 +184,15 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { @SuppressWarnings("unchecked") public void testGetProgressStatsWithPivotQuery() throws Exception { String transformId = "simple_stats_pivot_with_query"; - String dataFrameIndex = "pivot_stats_reviews_user_id_above_20"; + String transformIndex = "pivot_stats_reviews_user_id_above_20"; String query = "\"match\": {\"user_id\": \"user_26\"}"; - createPivotReviewsTransform(transformId, dataFrameIndex, query); - startAndWaitForTransform(transformId, dataFrameIndex); + createPivotReviewsTransform(transformId, transformIndex, query); + startAndWaitForTransform(transformId, transformIndex); // Alternate testing between admin and lowly user, as both should be able to get the configs and stats - String authHeader = randomFrom(BASIC_AUTH_VALUE_DATA_FRAME_USER, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN); + String authHeader = randomFrom(BASIC_AUTH_VALUE_TRANSFORM_USER, BASIC_AUTH_VALUE_TRANSFORM_ADMIN); - Request getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + transformId + "/_stats", authHeader); + Request getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + transformId + "/_stats", authHeader); Map stats = entityAsMap(client().performRequest(getRequest)); assertEquals(1, XContentMapValues.extractValue("count", stats)); List> transformsStats = (List>)XContentMapValues.extractValue("transforms", stats); @@ -218,7 +218,7 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { String transformDest = transformId + "_idx"; String transformSrc = "reviews_cont_pivot_test"; createReviewsIndex(transformSrc); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, null); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, null); String config = "{ \"dest\": {\"index\":\"" + transformDest + "\"}," + " \"source\": {\"index\":\"" + transformSrc + "\"}," + " \"frequency\": \"1s\"," @@ -236,13 +236,13 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { + " } } } }" + "}"; - createDataframeTransformRequest.setJsonEntity(config); + createTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); startAndWaitForContinuousTransform(transformId, transformDest, null); - Request getRequest = createRequestWithAuth("GET", DATAFRAME_ENDPOINT + transformId + "/_stats", null); + Request getRequest = createRequestWithAuth("GET", TRANSFORM_ENDPOINT + transformId + "/_stats", null); Map stats = entityAsMap(client().performRequest(getRequest)); List> transformsStats = (List>)XContentMapValues.extractValue("transforms", stats); assertEquals(1, transformsStats.size()); @@ -284,7 +284,7 @@ public class DataFrameGetAndGetStatsIT extends DataFrameRestTestCase { bulkRequest.setJsonEntity(bulk.toString()); client().performRequest(bulkRequest); - waitForDataFrameCheckpoint(transformId, 2L); + waitForTransformCheckpoint(transformId, 2L); // We should now have exp avgs since we have processed a continuous checkpoint assertBusy(() -> { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformInternalIndexIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformInternalIndexIT.java similarity index 78% rename from x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformInternalIndexIT.java rename to x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformInternalIndexIT.java index a604062ce4a..af35bb022de 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformInternalIndexIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformInternalIndexIT.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.integration; +package org.elasticsearch.xpack.transform.integration; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; @@ -12,38 +12,38 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.UpdateDataFrameTransformRequest; -import org.elasticsearch.client.dataframe.UpdateDataFrameTransformResponse; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.search.SearchModule; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndex; import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformRequest; +import org.elasticsearch.client.transform.GetDataFrameTransformResponse; +import org.elasticsearch.client.transform.UpdateDataFrameTransformRequest; +import org.elasticsearch.client.transform.UpdateDataFrameTransformResponse; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfigUpdate; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.Collections; -import static org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex.addDataFrameTransformsConfigMappings; import static org.hamcrest.Matchers.is; +import static org.elasticsearch.xpack.transform.persistence.TransformInternalIndex.addTransformsConfigMappings; import static org.hamcrest.Matchers.equalTo; -public class DataFrameTransformInternalIndexIT extends ESRestTestCase { +public class TransformInternalIndexIT extends ESRestTestCase { - private static final String CURRENT_INDEX = DataFrameInternalIndex.LATEST_INDEX_NAME; - private static final String OLD_INDEX = DataFrameInternalIndex.INDEX_PATTERN + "1"; + private static final String CURRENT_INDEX = TransformInternalIndex.LATEST_INDEX_NAME; + private static final String OLD_INDEX = TransformInternalIndex.INDEX_PATTERN + "1"; public void testUpdateDeletesOldTransformConfig() throws Exception { @@ -53,8 +53,8 @@ public class DataFrameTransformInternalIndexIT extends ESRestTestCase { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.startObject(); builder.startObject("properties"); - builder.startObject(DataFrameField.INDEX_DOC_TYPE.getPreferredName()).field("type", "keyword").endObject(); - addDataFrameTransformsConfigMappings(builder); + builder.startObject(TransformField.INDEX_DOC_TYPE.getPreferredName()).field("type", "keyword").endObject(); + addTransformsConfigMappings(builder); builder.endObject(); builder.endObject(); client.indices().create(new CreateIndexRequest(OLD_INDEX).mapping(builder), RequestOptions.DEFAULT); @@ -80,11 +80,11 @@ public class DataFrameTransformInternalIndexIT extends ESRestTestCase { + "\"frequency\":\"1s\"" + "}"; client.index(new IndexRequest(OLD_INDEX) - .id(DataFrameTransformConfig.documentId(transformId)) + .id(TransformConfig.documentId(transformId)) .source(config, XContentType.JSON) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); - GetResponse getResponse = client.get(new GetRequest(OLD_INDEX, DataFrameTransformConfig.documentId(transformId)), + GetResponse getResponse = client.get(new GetRequest(OLD_INDEX, TransformConfig.documentId(transformId)), RequestOptions.DEFAULT); assertThat(getResponse.isExists(), is(true)); @@ -100,11 +100,11 @@ public class DataFrameTransformInternalIndexIT extends ESRestTestCase { assertThat(updated.getTransformConfiguration().getDescription(), equalTo("updated")); // Old should now be gone - getResponse = client.get(new GetRequest(OLD_INDEX, DataFrameTransformConfig.documentId(transformId)), RequestOptions.DEFAULT); + getResponse = client.get(new GetRequest(OLD_INDEX, TransformConfig.documentId(transformId)), RequestOptions.DEFAULT); assertThat(getResponse.isExists(), is(false)); // New should be here - getResponse = client.get(new GetRequest(CURRENT_INDEX, DataFrameTransformConfig.documentId(transformId)), + getResponse = client.get(new GetRequest(CURRENT_INDEX, TransformConfig.documentId(transformId)), RequestOptions.DEFAULT); assertThat(getResponse.isExists(), is(true)); } diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformMetaDataIT.java similarity index 83% rename from x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java rename to x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformMetaDataIT.java index 26a957ea055..c3a38da474c 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameMetaDataIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformMetaDataIT.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.integration; +package org.elasticsearch.xpack.transform.integration; import org.elasticsearch.Version; import org.elasticsearch.client.Request; @@ -15,7 +15,7 @@ import org.junit.Before; import java.io.IOException; import java.util.Map; -public class DataFrameMetaDataIT extends DataFrameRestTestCase { +public class TransformMetaDataIT extends TransformRestTestCase { private boolean indicesCreated = false; @@ -46,14 +46,14 @@ public class DataFrameMetaDataIT extends DataFrameRestTestCase { Map mappingAsMap = entityAsMap(mappingResponse); assertEquals(Version.CURRENT.toString(), - XContentMapValues.extractValue("pivot_reviews.mappings._meta._data_frame.version.created", mappingAsMap)); - assertTrue((Long) XContentMapValues.extractValue("pivot_reviews.mappings._meta._data_frame.creation_date_in_millis", + XContentMapValues.extractValue("pivot_reviews.mappings._meta._transform.version.created", mappingAsMap)); + assertTrue((Long) XContentMapValues.extractValue("pivot_reviews.mappings._meta._transform.creation_date_in_millis", mappingAsMap) < System.currentTimeMillis()); - assertTrue((Long) XContentMapValues.extractValue("pivot_reviews.mappings._meta._data_frame.creation_date_in_millis", + assertTrue((Long) XContentMapValues.extractValue("pivot_reviews.mappings._meta._transform.creation_date_in_millis", mappingAsMap) > testStarted); assertEquals("test_meta", - XContentMapValues.extractValue("pivot_reviews.mappings._meta._data_frame.transform", mappingAsMap)); - assertEquals("data-frame-transform", + XContentMapValues.extractValue("pivot_reviews.mappings._meta._transform.transform", mappingAsMap)); + assertEquals("transform", XContentMapValues.extractValue("pivot_reviews.mappings._meta.created_by", mappingAsMap)); } diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java similarity index 67% rename from x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java rename to x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java index aca70d146fa..f590aba2c50 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformPivotRestIT.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.integration; +package org.elasticsearch.xpack.transform.integration; import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -24,11 +24,11 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; -public class DataFramePivotRestIT extends DataFrameRestTestCase { +public class TransformPivotRestIT extends TransformRestTestCase { - private static final String TEST_USER_NAME = "df_admin_plus_data"; + private static final String TEST_USER_NAME = "transform_admin_plus_data"; private static final String DATA_ACCESS_ROLE = "test_data_access"; - private static final String BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS = + private static final String BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS = basicAuthHeaderValue(TEST_USER_NAME, TEST_PASSWORD_SECURE_STRING); private static boolean indicesCreated = false; @@ -55,44 +55,44 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { public void testSimplePivot() throws Exception { String transformId = "simple-pivot"; - String dataFrameIndex = "pivot_reviews"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String transformIndex = "pivot_reviews"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - createPivotReviewsTransform(transformId, dataFrameIndex, null, null, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + createPivotReviewsTransform(transformId, transformIndex, null, null, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); // we expect 27 documents as there shall be 27 user_id's - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); // get and check some users - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_0", 3.776978417); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_5", 3.72); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_11", 3.846153846); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_20", 3.769230769); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_26", 3.918918918); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_0", 3.776978417); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_5", 3.72); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_11", 3.846153846); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_20", 3.769230769); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_26", 3.918918918); } public void testSimplePivotWithQuery() throws Exception { String transformId = "simple_pivot_with_query"; - String dataFrameIndex = "pivot_reviews_user_id_above_20"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String transformIndex = "pivot_reviews_user_id_above_20"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); String query = "\"match\": {\"user_id\": \"user_26\"}"; - createPivotReviewsTransform(transformId, dataFrameIndex, query, null, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + createPivotReviewsTransform(transformId, transformIndex, query, null, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); // we expect only 1 document due to the query - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); assertEquals(1, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_26", 3.918918918); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_26", 3.918918918); } public void testPivotWithPipeline() throws Exception { String transformId = "simple_pivot_with_pipeline"; - String dataFrameIndex = "pivot_with_pipeline"; + String transformIndex = "pivot_with_pipeline"; String pipelineId = "my-pivot-pipeline"; int pipelineValue = 42; Request pipelineRequest = new Request("PUT", "/_ingest/pipeline/" + pipelineId); @@ -109,36 +109,36 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { "}"); client().performRequest(pipelineRequest); - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); - createPivotReviewsTransform(transformId, dataFrameIndex, null, pipelineId, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); + createPivotReviewsTransform(transformId, transformIndex, null, pipelineId, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); // we expect 27 documents as there shall be 27 user_id's - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); // get and check some users - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_0", 3.776978417); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_5", 3.72); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_11", 3.846153846); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_20", 3.769230769); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_26", 3.918918918); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_0", 3.776978417); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_5", 3.72); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_11", 3.846153846); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_20", 3.769230769); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_26", 3.918918918); - Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_0"); + Map searchResult = getAsMap(transformIndex + "/_search?q=reviewer:user_0"); Integer actual = (Integer) ((List) XContentMapValues.extractValue("hits.hits._source.pipeline_field", searchResult)).get(0); assertThat(actual, equalTo(pipelineValue)); } public void testBucketSelectorPivot() throws Exception { String transformId = "simple_bucket_selector_pivot"; - String dataFrameIndex = "bucket_selector_idx"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + String transformIndex = "bucket_selector_idx"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"}," + + " \"dest\": {\"index\":\"" + transformIndex + "\"}," + " \"frequency\": \"1s\"," + " \"pivot\": {" + " \"group_by\": {" @@ -158,17 +158,17 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " }" + " } } }" + "}"; - createDataframeTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + createTransformRequest.setJsonEntity(config); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex); - assertTrue(indexExists(dataFrameIndex)); + startAndWaitForTransform(transformId, transformIndex); + assertTrue(indexExists(transformIndex)); // get and check some users - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_11", 3.846153846); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_26", 3.918918918); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_11", 3.846153846); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_26", 3.918918918); - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); // Should be less than the total number of users since we filtered every user who had an average review less than or equal to 3.8 assertEquals(21, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); } @@ -177,13 +177,13 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { String indexName = "continuous_reviews"; createReviewsIndex(indexName); String transformId = "simple_continuous_pivot"; - String dataFrameIndex = "pivot_reviews_continuous"; - setupDataAccessRole(DATA_ACCESS_ROLE, indexName, dataFrameIndex); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + String transformIndex = "pivot_reviews_continuous"; + setupDataAccessRole(DATA_ACCESS_ROLE, indexName, transformIndex); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + indexName + "\"}," - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"}," + + " \"dest\": {\"index\":\"" + transformIndex + "\"}," + " \"frequency\": \"1s\"," + " \"sync\": {\"time\": {\"field\": \"timestamp\", \"delay\": \"1s\"}}," + " \"pivot\": {" @@ -198,20 +198,20 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " \"field\": \"stars\"" + " } } } }" + "}"; - createDataframeTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + createTransformRequest.setJsonEntity(config); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForContinuousTransform(transformId, dataFrameIndex, null); - assertTrue(indexExists(dataFrameIndex)); + startAndWaitForContinuousTransform(transformId, transformIndex, null); + assertTrue(indexExists(transformIndex)); // get and check some users - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_0", 3.776978417); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_5", 3.72); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_11", 3.846153846); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_20", 3.769230769); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_26", 3.918918918); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_0", 3.776978417); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_5", 3.72); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_11", 3.846153846); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_20", 3.769230769); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_26", 3.918918918); - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); final StringBuilder bulk = new StringBuilder(); long user = 42; @@ -262,25 +262,25 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { bulkRequest.setJsonEntity(bulk.toString()); client().performRequest(bulkRequest); - waitForDataFrameCheckpoint(transformId, 2); + waitForTransformCheckpoint(transformId, 2); - stopDataFrameTransform(transformId, false); - refreshIndex(dataFrameIndex); + stopTransform(transformId, false); + refreshIndex(transformIndex); // assert that other users are unchanged - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_0", 3.776978417); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_5", 3.72); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_11", 3.846153846); - assertOnePivotValue(dataFrameIndex + "/_search?q=reviewer:user_20", 3.769230769); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_0", 3.776978417); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_5", 3.72); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_11", 3.846153846); + assertOnePivotValue(transformIndex + "/_search?q=reviewer:user_20", 3.769230769); - Map user26searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_26"); + Map user26searchResult = getAsMap(transformIndex + "/_search?q=reviewer:user_26"); assertEquals(1, XContentMapValues.extractValue("hits.total.value", user26searchResult)); double actual = (Double) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", user26searchResult)) .get(0); assertThat(actual, greaterThan(3.92)); - Map user42searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_42"); + Map user42searchResult = getAsMap(transformIndex + "/_search?q=reviewer:user_42"); assertEquals(1, XContentMapValues.extractValue("hits.total.value", user42searchResult)); actual = (Double) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", user42searchResult)) .get(0); @@ -290,15 +290,15 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { public void testHistogramPivot() throws Exception { String transformId = "simple_histogram_pivot"; - String dataFrameIndex = "pivot_reviews_via_histogram"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String transformIndex = "pivot_reviews_via_histogram"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + " \"dest\": {\"index\":\"" + transformIndex + "\"},"; config += " \"pivot\": {" + " \"group_by\": {" @@ -313,30 +313,30 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " } } } }" + "}"; - createDataframeTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + createTransformRequest.setJsonEntity(config); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex); - assertTrue(indexExists(dataFrameIndex)); + startAndWaitForTransform(transformId, transformIndex); + assertTrue(indexExists(transformIndex)); // we expect 3 documents as there shall be 5 unique star values and we are bucketing every 2 starting at 0 - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); assertEquals(3, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); - assertOnePivotValue(dataFrameIndex + "/_search?q=every_2:0.0", 1.0); + assertOnePivotValue(transformIndex + "/_search?q=every_2:0.0", 1.0); } public void testBiggerPivot() throws Exception { String transformId = "bigger_pivot"; - String dataFrameIndex = "bigger_pivot_reviews"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String transformIndex = "bigger_pivot_reviews"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + " \"dest\": {\"index\":\"" + transformIndex + "\"},"; config += " \"pivot\": {" + " \"group_by\": {" @@ -372,19 +372,19 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " } }" + "}"; - createDataframeTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + createTransformRequest.setJsonEntity(config); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); - assertTrue(indexExists(dataFrameIndex)); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); + assertTrue(indexExists(transformIndex)); // we expect 27 documents as there shall be 27 user_id's - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); // get and check some users - Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_4"); + Map searchResult = getAsMap(transformIndex + "/_search?q=reviewer:user_4"); assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); Number actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); @@ -403,15 +403,15 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { public void testDateHistogramPivot() throws Exception { String transformId = "simple_date_histogram_pivot"; - String dataFrameIndex = "pivot_reviews_via_date_histogram"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String transformIndex = "pivot_reviews_via_date_histogram"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + " \"dest\": {\"index\":\"" + transformIndex + "\"},"; config += " \"pivot\": {" + " \"group_by\": {" @@ -426,24 +426,24 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " } } } }" + "}"; - createDataframeTransformRequest.setJsonEntity(config); + createTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); - assertTrue(indexExists(dataFrameIndex)); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); + assertTrue(indexExists(transformIndex)); - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); assertEquals(104, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); - assertOnePivotValue(dataFrameIndex + "/_search?q=by_hr:1484499600000", 4.0833333333); + assertOnePivotValue(transformIndex + "/_search?q=by_hr:1484499600000", 4.0833333333); } @SuppressWarnings("unchecked") public void testPreviewTransform() throws Exception { setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME); - final Request createPreviewRequest = createRequestWithAuth("POST", DATAFRAME_ENDPOINT + "_preview", - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + final Request createPreviewRequest = createRequestWithAuth("POST", TRANSFORM_ENDPOINT + "_preview", + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"} ,"; @@ -460,8 +460,8 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + "}"; createPreviewRequest.setJsonEntity(config); - Map previewDataframeResponse = entityAsMap(client().performRequest(createPreviewRequest)); - List> preview = (List>) previewDataframeResponse.get("preview"); + Map previewTransformResponse = entityAsMap(client().performRequest(createPreviewRequest)); + List> preview = (List>) previewTransformResponse.get("preview"); // preview is limited to 100 assertThat(preview.size(), equalTo(100)); Set expectedTopLevelFields = new HashSet<>(Arrays.asList("user", "by_day")); @@ -494,7 +494,7 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { client().performRequest(pipelineRequest); setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME); - final Request createPreviewRequest = createRequestWithAuth("POST", DATAFRAME_ENDPOINT + "_preview", null); + final Request createPreviewRequest = createRequestWithAuth("POST", TRANSFORM_ENDPOINT + "_preview", null); String config = "{ \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"} ," + "\"dest\": {\"pipeline\": \"" + pipelineId + "\"}," @@ -510,8 +510,8 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + "}"; createPreviewRequest.setJsonEntity(config); - Map previewDataframeResponse = entityAsMap(client().performRequest(createPreviewRequest)); - List> preview = (List>)previewDataframeResponse.get("preview"); + Map previewTransformResponse = entityAsMap(client().performRequest(createPreviewRequest)); + List> preview = (List>)previewTransformResponse.get("preview"); // preview is limited to 100 assertThat(preview.size(), equalTo(100)); Set expectedTopLevelFields = new HashSet<>(Arrays.asList("user", "by_day", "pipeline_field")); @@ -528,15 +528,15 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { public void testPivotWithMaxOnDateField() throws Exception { String transformId = "simple_date_histogram_pivot_with_max_time"; - String dataFrameIndex = "pivot_reviews_via_date_histogram_with_max_time"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String transformIndex = "pivot_reviews_via_date_histogram_with_max_time"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\": \"" + REVIEWS_INDEX_NAME + "\"}," - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + " \"dest\": {\"index\":\"" + transformIndex + "\"},"; config +=" \"pivot\": { \n" + " \"group_by\": {\n" + @@ -555,19 +555,19 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { " }}" + "}"; - createDataframeTransformRequest.setJsonEntity(config); + createTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); - assertTrue(indexExists(dataFrameIndex)); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); + assertTrue(indexExists(transformIndex)); // we expect 21 documents as there shall be 21 days worth of docs - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); assertEquals(21, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); - assertOnePivotValue(dataFrameIndex + "/_search?q=by_day:2017-01-15", 3.82); - Map searchResult = getAsMap(dataFrameIndex + "/_search?q=by_day:2017-01-15"); + assertOnePivotValue(transformIndex + "/_search?q=by_day:2017-01-15", 3.82); + Map searchResult = getAsMap(transformIndex + "/_search?q=by_day:2017-01-15"); String actual = (String) ((List) XContentMapValues.extractValue("hits.hits._source.timestamp", searchResult)).get(0); // Do `containsString` as actual ending timestamp is indeterminate due to how data is generated assertThat(actual, containsString("2017-01-15T")); @@ -575,15 +575,15 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { public void testPivotWithScriptedMetricAgg() throws Exception { String transformId = "scripted_metric_pivot"; - String dataFrameIndex = "scripted_metric_pivot_reviews"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String transformIndex = "scripted_metric_pivot_reviews"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + " \"dest\": {\"index\":\"" + transformIndex + "\"},"; config += " \"pivot\": {" + " \"group_by\": {" @@ -606,19 +606,19 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " } }" + "}"; - createDataframeTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + createTransformRequest.setJsonEntity(config); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); - assertTrue(indexExists(dataFrameIndex)); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); + assertTrue(indexExists(transformIndex)); // we expect 27 documents as there shall be 27 user_id's - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); // get and check some users - Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_4"); + Map searchResult = getAsMap(transformIndex + "/_search?q=reviewer:user_4"); assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); Number actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); assertEquals(3.878048780, actual.doubleValue(), 0.000001); @@ -628,15 +628,15 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { public void testPivotWithBucketScriptAgg() throws Exception { String transformId = "bucket_script_pivot"; - String dataFrameIndex = "bucket_script_pivot_reviews"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String transformIndex = "bucket_script_pivot_reviews"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + " \"dest\": {\"index\":\"" + transformIndex + "\"},"; config += " \"pivot\": {" + " \"group_by\": {" @@ -657,19 +657,19 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " } }" + "}"; - createDataframeTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + createTransformRequest.setJsonEntity(config); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); - assertTrue(indexExists(dataFrameIndex)); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); + assertTrue(indexExists(transformIndex)); // we expect 27 documents as there shall be 27 user_id's - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); // get and check some users - Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_4"); + Map searchResult = getAsMap(transformIndex + "/_search?q=reviewer:user_4"); assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); Number actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); assertEquals(3.878048780, actual.doubleValue(), 0.000001); @@ -680,15 +680,15 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { @SuppressWarnings("unchecked") public void testPivotWithGeoBoundsAgg() throws Exception { String transformId = "geo_bounds_pivot"; - String dataFrameIndex = "geo_bounds_pivot_reviews"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String transformIndex = "geo_bounds_pivot_reviews"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + " \"dest\": {\"index\":\"" + transformIndex + "\"},"; config += " \"pivot\": {" + " \"group_by\": {" @@ -706,19 +706,19 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " } } }" + "}"; - createDataframeTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + createTransformRequest.setJsonEntity(config); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); - assertTrue(indexExists(dataFrameIndex)); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); + assertTrue(indexExists(transformIndex)); // we expect 27 documents as there shall be 27 user_id's - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); // get and check some users - Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_4"); + Map searchResult = getAsMap(transformIndex + "/_search?q=reviewer:user_4"); assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); Number actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); assertEquals(3.878048780, actual.doubleValue(), 0.000001); @@ -733,15 +733,15 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { public void testPivotWithGeoCentroidAgg() throws Exception { String transformId = "geo_centroid_pivot"; - String dataFrameIndex = "geo_centroid_pivot_reviews"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String transformIndex = "geo_centroid_pivot_reviews"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + " \"dest\": {\"index\":\"" + transformIndex + "\"},"; config += " \"pivot\": {" + " \"group_by\": {" @@ -759,19 +759,19 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " } } }" + "}"; - createDataframeTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + createTransformRequest.setJsonEntity(config); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); - assertTrue(indexExists(dataFrameIndex)); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); + assertTrue(indexExists(transformIndex)); // we expect 27 documents as there shall be 27 user_id's - Map indexStats = getAsMap(dataFrameIndex + "/_stats"); + Map indexStats = getAsMap(transformIndex + "/_stats"); assertEquals(27, XContentMapValues.extractValue("_all.total.docs.count", indexStats)); // get and check some users - Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_4"); + Map searchResult = getAsMap(transformIndex + "/_search?q=reviewer:user_4"); assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); Number actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); assertEquals(3.878048780, actual.doubleValue(), 0.000001); @@ -783,15 +783,15 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { public void testPivotWithWeightedAvgAgg() throws Exception { String transformId = "weighted_avg_agg_transform"; - String dataFrameIndex = "weighted_avg_pivot_reviews"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); + String transformIndex = "weighted_avg_pivot_reviews"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"},"; + + " \"dest\": {\"index\":\"" + transformIndex + "\"},"; config += " \"pivot\": {" + " \"group_by\": {" @@ -807,14 +807,14 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + "} } } }" + "}"; - createDataframeTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + createTransformRequest.setJsonEntity(config); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); - assertTrue(indexExists(dataFrameIndex)); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); + assertTrue(indexExists(transformIndex)); - Map searchResult = getAsMap(dataFrameIndex + "/_search?q=reviewer:user_4"); + Map searchResult = getAsMap(transformIndex + "/_search?q=reviewer:user_4"); assertEquals(1, XContentMapValues.extractValue("hits.total.value", searchResult)); Number actual = (Number) ((List) XContentMapValues.extractValue("hits.hits._source.avg_rating", searchResult)).get(0); assertEquals(4.47169811, actual.doubleValue(), 0.000001); @@ -822,14 +822,14 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { public void testManyBucketsWithSmallPageSize() throws Exception { String transformId = "test_with_many_buckets"; - String dataFrameIndex = transformId + "-idx"; - setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, dataFrameIndex); - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, - BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + String transformIndex = transformId + "-idx"; + setupDataAccessRole(DATA_ACCESS_ROLE, REVIEWS_INDEX_NAME, transformIndex); + final Request createTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, + BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); String config = "{" + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," - + " \"dest\": {\"index\":\"" + dataFrameIndex + "\"}," + + " \"dest\": {\"index\":\"" + transformIndex + "\"}," + " \"pivot\": {" + " \"max_page_search_size\": 10," + " \"group_by\": {" @@ -846,14 +846,14 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " \"field\": \"stars\"" + " } } } }" + "}"; - createDataframeTransformRequest.setJsonEntity(config); - Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); - assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + createTransformRequest.setJsonEntity(config); + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); - assertTrue(indexExists(dataFrameIndex)); + startAndWaitForTransform(transformId, transformIndex, BASIC_AUTH_VALUE_TRANSFORM_ADMIN_WITH_SOME_DATA_ACCESS); + assertTrue(indexExists(transformIndex)); - Map stats = getAsMap(DATAFRAME_ENDPOINT + transformId + "/_stats"); + Map stats = getAsMap(TRANSFORM_ENDPOINT + transformId + "/_stats"); assertEquals(101, ((List)XContentMapValues.extractValue("transforms.stats.pages_processed", stats)).get(0)); } diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformProgressIT.java similarity index 84% rename from x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java rename to x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformProgressIT.java index 2df5963f264..2cbd29c857c 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformProgressIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformProgressIT.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.integration; +package org.elasticsearch.xpack.transform.integration; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BulkRequest; @@ -23,27 +23,27 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; -import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.QueryConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.AggregationConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.HistogramGroupSource; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.xpack.dataframe.transforms.TransformProgressGatherer; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; +import org.elasticsearch.xpack.core.transform.transforms.DestConfig; +import org.elasticsearch.xpack.core.transform.transforms.QueryConfig; +import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.AggregationConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.GroupConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.HistogramGroupSource; +import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfig; +import org.elasticsearch.xpack.transform.transforms.TransformProgressGatherer; import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.Collections; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.xpack.dataframe.integration.DataFrameRestTestCase.REVIEWS_INDEX_NAME; +import static org.elasticsearch.xpack.transform.integration.TransformRestTestCase.REVIEWS_INDEX_NAME; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class DataFrameTransformProgressIT extends ESRestTestCase { +public class TransformProgressIT extends ESRestTestCase { protected void createReviewsIndex() throws Exception { final int numDocs = 1000; @@ -127,7 +127,7 @@ public class DataFrameTransformProgressIT extends ESRestTestCase { aggs.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); AggregationConfig aggregationConfig = new AggregationConfig(Collections.emptyMap(), aggs); PivotConfig pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig, null); - DataFrameTransformConfig config = new DataFrameTransformConfig("get_progress_transform", + TransformConfig config = new TransformConfig("get_progress_transform", sourceConfig, destConfig, null, @@ -141,8 +141,8 @@ public class DataFrameTransformProgressIT extends ESRestTestCase { TransformProgressGatherer.getSearchRequest(config, config.getSource().getQueryConfig().getQuery()), RequestOptions.DEFAULT); - DataFrameTransformProgress progress = - TransformProgressGatherer.searchResponseToDataFrameTransformProgressFunction().apply(response); + TransformProgress progress = + TransformProgressGatherer.searchResponseToTransformProgressFunction().apply(response); assertThat(progress.getTotalDocs(), equalTo(1000L)); assertThat(progress.getDocumentsProcessed(), equalTo(0L)); @@ -152,7 +152,7 @@ public class DataFrameTransformProgressIT extends ESRestTestCase { QueryConfig queryConfig = new QueryConfig(Collections.emptyMap(), QueryBuilders.termQuery("user_id", "user_26")); pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig, null); sourceConfig = new SourceConfig(new String[]{REVIEWS_INDEX_NAME}, queryConfig); - config = new DataFrameTransformConfig("get_progress_transform", + config = new TransformConfig("get_progress_transform", sourceConfig, destConfig, null, @@ -163,7 +163,7 @@ public class DataFrameTransformProgressIT extends ESRestTestCase { response = restClient.search(TransformProgressGatherer.getSearchRequest(config, config.getSource().getQueryConfig().getQuery()), RequestOptions.DEFAULT); - progress = TransformProgressGatherer.searchResponseToDataFrameTransformProgressFunction().apply(response); + progress = TransformProgressGatherer.searchResponseToTransformProgressFunction().apply(response); assertThat(progress.getTotalDocs(), equalTo(35L)); assertThat(progress.getDocumentsProcessed(), equalTo(0L)); @@ -172,7 +172,7 @@ public class DataFrameTransformProgressIT extends ESRestTestCase { histgramGroupConfig = new GroupConfig(Collections.emptyMap(), Collections.singletonMap("every_50", new HistogramGroupSource("missing_field", 50.0))); pivotConfig = new PivotConfig(histgramGroupConfig, aggregationConfig, null); - config = new DataFrameTransformConfig("get_progress_transform", + config = new TransformConfig("get_progress_transform", sourceConfig, destConfig, null, @@ -183,7 +183,7 @@ public class DataFrameTransformProgressIT extends ESRestTestCase { response = restClient.search(TransformProgressGatherer.getSearchRequest(config, config.getSource().getQueryConfig().getQuery()), RequestOptions.DEFAULT); - progress = TransformProgressGatherer.searchResponseToDataFrameTransformProgressFunction().apply(response); + progress = TransformProgressGatherer.searchResponseToTransformProgressFunction().apply(response); assertThat(progress.getTotalDocs(), equalTo(0L)); assertThat(progress.getDocumentsProcessed(), equalTo(0L)); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java similarity index 89% rename from x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java rename to x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java index 455009b4969..372efddc57c 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.integration; +package org.elasticsearch.xpack.transform.integration; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; @@ -19,8 +19,8 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndex; import org.junit.After; import org.junit.AfterClass; @@ -36,7 +36,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.equalTo; -public abstract class DataFrameRestTestCase extends ESRestTestCase { +public abstract class TransformRestTestCase extends ESRestTestCase { protected static final String TEST_PASSWORD = "x-pack-test-password"; protected static final SecureString TEST_PASSWORD_SECURE_STRING = new SecureString(TEST_PASSWORD.toCharArray()); @@ -44,7 +44,7 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { protected static final String REVIEWS_INDEX_NAME = "reviews"; - protected static final String DATAFRAME_ENDPOINT = DataFrameField.REST_BASE_PATH + "transforms/"; + protected static final String TRANSFORM_ENDPOINT = TransformField.REST_BASE_PATH + "transforms/"; @Override protected Settings restClientSettings() { @@ -159,7 +159,7 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { protected void createContinuousPivotReviewsTransform(String transformId, String dataFrameIndex, String authHeader) throws IOException { - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, authHeader); + final Request createDataframeTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, authHeader); String config = "{ \"dest\": {\"index\":\"" + dataFrameIndex + "\"}," + " \"source\": {\"index\":\"" + REVIEWS_INDEX_NAME + "\"}," @@ -188,7 +188,7 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { protected void createPivotReviewsTransform(String transformId, String dataFrameIndex, String query, String pipeline, String authHeader) throws IOException { - final Request createDataframeTransformRequest = createRequestWithAuth("PUT", DATAFRAME_ENDPOINT + transformId, authHeader); + final Request createDataframeTransformRequest = createRequestWithAuth("PUT", TRANSFORM_ENDPOINT + transformId, authHeader); String config = "{"; @@ -224,14 +224,13 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); } - protected void startDataframeTransform(String transformId, boolean force) throws IOException { - startDataframeTransform(transformId, force, null); + protected void startDataframeTransform(String transformId) throws IOException { + startDataframeTransform(transformId, null); } - protected void startDataframeTransform(String transformId, boolean force, String authHeader, String... warnings) throws IOException { + protected void startDataframeTransform(String transformId, String authHeader, String... warnings) throws IOException { // start the transform - final Request startTransformRequest = createRequestWithAuth("POST", DATAFRAME_ENDPOINT + transformId + "/_start", authHeader); - startTransformRequest.addParameter(DataFrameField.FORCE.getPreferredName(), Boolean.toString(force)); + final Request startTransformRequest = createRequestWithAuth("POST", TRANSFORM_ENDPOINT + transformId + "/_start", authHeader); if (warnings.length > 0) { startTransformRequest.setOptions(expectWarnings(warnings)); } @@ -239,11 +238,11 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { assertThat(startTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); } - protected void stopDataFrameTransform(String transformId, boolean force) throws Exception { + protected void stopTransform(String transformId, boolean force) throws Exception { // start the transform - final Request stopTransformRequest = createRequestWithAuth("POST", DATAFRAME_ENDPOINT + transformId + "/_stop", null); - stopTransformRequest.addParameter(DataFrameField.FORCE.getPreferredName(), Boolean.toString(force)); - stopTransformRequest.addParameter(DataFrameField.WAIT_FOR_COMPLETION.getPreferredName(), Boolean.toString(true)); + final Request stopTransformRequest = createRequestWithAuth("POST", TRANSFORM_ENDPOINT + transformId + "/_stop", null); + stopTransformRequest.addParameter(TransformField.FORCE.getPreferredName(), Boolean.toString(force)); + stopTransformRequest.addParameter(TransformField.WAIT_FOR_COMPLETION.getPreferredName(), Boolean.toString(true)); Map stopTransformResponse = entityAsMap(client().performRequest(stopTransformRequest)); assertThat(stopTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); } @@ -259,7 +258,7 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { protected void startAndWaitForTransform(String transformId, String dataFrameIndex, String authHeader, String... warnings) throws Exception { // start the transform - startDataframeTransform(transformId, false, authHeader, warnings); + startDataframeTransform(transformId, authHeader, warnings); assertTrue(indexExists(dataFrameIndex)); // wait until the dataframe has been created and all data is available waitForDataFrameCheckpoint(transformId); @@ -279,10 +278,10 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { String authHeader, long checkpoint) throws Exception { // start the transform - startDataframeTransform(transformId, false, authHeader, new String[0]); + startDataframeTransform(transformId, authHeader, new String[0]); assertTrue(indexExists(dataFrameIndex)); // wait until the dataframe has been created and all data is available - waitForDataFrameCheckpoint(transformId, checkpoint); + waitForTransformCheckpoint(transformId, checkpoint); refreshIndex(dataFrameIndex); } @@ -305,10 +304,10 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { } void waitForDataFrameCheckpoint(String transformId) throws Exception { - waitForDataFrameCheckpoint(transformId, 1L); + waitForTransformCheckpoint(transformId, 1L); } - void waitForDataFrameCheckpoint(String transformId, long checkpoint) throws Exception { + void waitForTransformCheckpoint(String transformId, long checkpoint) throws Exception { assertBusy(() -> assertEquals(checkpoint, getDataFrameCheckpoint(transformId)), 30, TimeUnit.SECONDS); } @@ -318,7 +317,7 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { @SuppressWarnings("unchecked") private static List> getDataFrameTransforms() throws IOException { - Response response = adminClient().performRequest(new Request("GET", DATAFRAME_ENDPOINT + "_all")); + Response response = adminClient().performRequest(new Request("GET", TRANSFORM_ENDPOINT + "_all")); Map transforms = entityAsMap(response); List> transformConfigs = (List>) XContentMapValues.extractValue("transforms", transforms); @@ -331,7 +330,7 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { } protected static Map getDataFrameState(String transformId) throws IOException { - Response statsResponse = client().performRequest(new Request("GET", DATAFRAME_ENDPOINT + transformId + "/_stats")); + Response statsResponse = client().performRequest(new Request("GET", TRANSFORM_ENDPOINT + transformId + "/_stats")); List transforms = ((List) entityAsMap(statsResponse).get("transforms")); if (transforms.isEmpty()) { return null; @@ -339,15 +338,15 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { return (Map) transforms.get(0); } - protected static void deleteDataFrameTransform(String transformId) throws IOException { - Request request = new Request("DELETE", DATAFRAME_ENDPOINT + transformId); + protected static void deleteTransform(String transformId) throws IOException { + Request request = new Request("DELETE", TRANSFORM_ENDPOINT + transformId); request.addParameter("ignore", "404"); // Ignore 404s because they imply someone was racing us to delete this adminClient().performRequest(request); } @After public void waitForDataFrame() throws Exception { - wipeDataFrameTransforms(); + wipeTransforms(); waitForPendingDataFrameTasks(); } @@ -358,11 +357,11 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { wipeAllIndices(); } - public void wipeDataFrameTransforms() throws IOException { + public void wipeTransforms() throws IOException { List> transformConfigs = getDataFrameTransforms(); for (Map transformConfig : transformConfigs) { String transformId = (String) transformConfig.get("id"); - Request request = new Request("POST", DATAFRAME_ENDPOINT + transformId + "/_stop"); + Request request = new Request("POST", TRANSFORM_ENDPOINT + transformId + "/_stop"); request.addParameter("wait_for_completion", "true"); request.addParameter("timeout", "10s"); request.addParameter("ignore", "404"); @@ -377,7 +376,7 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { for (Map transformConfig : transformConfigs) { String transformId = (String) transformConfig.get("id"); - deleteDataFrameTransform(transformId); + deleteTransform(transformId); } // transforms should be all gone @@ -385,7 +384,7 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { assertTrue(transformConfigs.isEmpty()); // the configuration index should be empty - Request request = new Request("GET", DataFrameInternalIndex.LATEST_INDEX_NAME + "/_search"); + Request request = new Request("GET", TransformInternalIndex.LATEST_INDEX_NAME + "/_search"); try { Response searchResponse = adminClient().performRequest(request); Map searchResult = entityAsMap(searchResponse); @@ -400,11 +399,11 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { } protected static void waitForPendingDataFrameTasks() throws Exception { - waitForPendingTasks(adminClient(), taskName -> taskName.startsWith(DataFrameField.TASK_NAME) == false); + waitForPendingTasks(adminClient(), taskName -> taskName.startsWith(TransformField.TASK_NAME) == false); } static int getDataFrameCheckpoint(String transformId) throws IOException { - Response statsResponse = client().performRequest(new Request("GET", DATAFRAME_ENDPOINT + transformId + "/_stats")); + Response statsResponse = client().performRequest(new Request("GET", TRANSFORM_ENDPOINT + transformId + "/_stats")); Map transformStatsAsMap = (Map) ((List) entityAsMap(statsResponse).get("transforms")).get(0); return (int) XContentMapValues.extractValue("checkpointing.last.checkpoint", transformStatsAsMap); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformTaskFailedStateIT.java similarity index 69% rename from x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java rename to x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformTaskFailedStateIT.java index edd8eb44a9f..a9f8a9bc963 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformTaskFailedStateIT.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.integration; +package org.elasticsearch.xpack.transform.integration; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; @@ -14,7 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformStats; import org.junit.After; import org.junit.Before; @@ -28,10 +28,8 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.oneOf; -public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { +public class TransformTaskFailedStateIT extends TransformRestTestCase { private final List failureTransforms = new ArrayList<>(); @Before @@ -41,10 +39,10 @@ public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { // see: https://github.com/elastic/elasticsearch/issues/45562 Request addFailureRetrySetting = new Request("PUT", "/_cluster/settings"); addFailureRetrySetting.setJsonEntity( - "{\"transient\": {\"xpack.data_frame.num_transform_failure_retries\": \"" + 0 + "\"," + + "{\"transient\": {\"xpack.transform.num_transform_failure_retries\": \"" + 0 + "\"," + "\"logger.org.elasticsearch.action.bulk\": \"info\"," + // reduces bulk failure spam "\"logger.org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer\": \"trace\"," + - "\"logger.org.elasticsearch.xpack.dataframe\": \"trace\"}}"); + "\"logger.org.elasticsearch.xpack.transform\": \"trace\"}}"); client().performRequest(addFailureRetrySetting); } @@ -53,20 +51,20 @@ public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { // If the tests failed in the middle, we should force stop it. This prevents other transform tests from failing due // to this left over transform for (String transformId : failureTransforms) { - stopDataFrameTransform(transformId, true); - deleteDataFrameTransform(transformId); + stopTransform(transformId, true); + deleteTransform(transformId); } } public void testForceStopFailedTransform() throws Exception { String transformId = "test-force-stop-failed-transform"; createReviewsIndex(REVIEWS_INDEX_NAME, 10); - String dataFrameIndex = "failure_pivot_reviews"; - createDestinationIndexWithBadMapping(dataFrameIndex); - createContinuousPivotReviewsTransform(transformId, dataFrameIndex, null); + String transformIndex = "failure_pivot_reviews"; + createDestinationIndexWithBadMapping(transformIndex); + createContinuousPivotReviewsTransform(transformId, transformIndex, null); failureTransforms.add(transformId); - startDataframeTransform(transformId, false); - awaitState(transformId, DataFrameTransformStats.State.FAILED); + startDataframeTransform(transformId); + awaitState(transformId, TransformStats.State.FAILED); Map fullState = getDataFrameState(transformId); final String failureReason = "task encountered more than 0 failures; latest failure: " + "Bulk index experienced failures. See the logs of the node running the transform for details."; @@ -74,62 +72,51 @@ public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { assertThat(XContentMapValues.extractValue("reason", fullState), equalTo(failureReason)); // verify that we cannot stop a failed transform - ResponseException ex = expectThrows(ResponseException.class, () -> stopDataFrameTransform(transformId, false)); + ResponseException ex = expectThrows(ResponseException.class, () -> stopTransform(transformId, false)); assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus())); assertThat(XContentMapValues.extractValue("error.reason", entityAsMap(ex.getResponse())), - equalTo("Unable to stop data frame transform [test-force-stop-failed-transform] as it is in a failed state with reason [" + + equalTo("Unable to stop transform [test-force-stop-failed-transform] as it is in a failed state with reason [" + failureReason + - "]. Use force stop to stop the data frame transform.")); + "]. Use force stop to stop the transform.")); // Verify that we can force stop a failed transform - stopDataFrameTransform(transformId, true); + stopTransform(transformId, true); - awaitState(transformId, DataFrameTransformStats.State.STOPPED); + awaitState(transformId, TransformStats.State.STOPPED); fullState = getDataFrameState(transformId); assertThat(XContentMapValues.extractValue("reason", fullState), is(nullValue())); } - public void testForceStartFailedTransform() throws Exception { + public void testStartFailedTransform() throws Exception { String transformId = "test-force-start-failed-transform"; createReviewsIndex(REVIEWS_INDEX_NAME, 10); String dataFrameIndex = "failure_pivot_reviews"; createDestinationIndexWithBadMapping(dataFrameIndex); createContinuousPivotReviewsTransform(transformId, dataFrameIndex, null); failureTransforms.add(transformId); - startDataframeTransform(transformId, false); - awaitState(transformId, DataFrameTransformStats.State.FAILED); + startDataframeTransform(transformId); + awaitState(transformId, TransformStats.State.FAILED); Map fullState = getDataFrameState(transformId); final String failureReason = "task encountered more than 0 failures; latest failure: " + "Bulk index experienced failures. See the logs of the node running the transform for details."; // Verify we have failed for the expected reason assertThat(XContentMapValues.extractValue("reason", fullState), equalTo(failureReason)); - final String expectedFailure = "Unable to start data frame transform [test-force-start-failed-transform] " + + final String expectedFailure = "Unable to start transform [test-force-start-failed-transform] " + "as it is in a failed state with failure: [" + failureReason + - "]. Use force start to restart data frame transform once error is resolved."; + "]. Use force stop and then restart the transform once error is resolved."; // Verify that we cannot start the transform when the task is in a failed state assertBusy(() -> { - ResponseException ex = expectThrows(ResponseException.class, () -> startDataframeTransform(transformId, false)); + ResponseException ex = expectThrows(ResponseException.class, () -> startDataframeTransform(transformId)); assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus())); assertThat(XContentMapValues.extractValue("error.reason", entityAsMap(ex.getResponse())), equalTo(expectedFailure)); }, 60, TimeUnit.SECONDS); - // Correct the failure by deleting the destination index - deleteIndex(dataFrameIndex); - // Force start the data frame to indicate failure correction - startDataframeTransform(transformId, true); - - // Verify that we have started and that our reason is cleared - fullState = getDataFrameState(transformId); - assertThat(XContentMapValues.extractValue("reason", fullState), is(nullValue())); - assertThat(XContentMapValues.extractValue("state", fullState), oneOf("started", "indexing")); - assertThat((Integer)XContentMapValues.extractValue("stats.index_failures", fullState), greaterThanOrEqualTo(1)); - - stopDataFrameTransform(transformId, true); + stopTransform(transformId, true); } - private void awaitState(String transformId, DataFrameTransformStats.State state) throws Exception { + private void awaitState(String transformId, TransformStats.State state) throws Exception { assertBusy(() -> { String currentState = getDataFrameTransformState(transformId); assertThat(currentState, equalTo(state.value())); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformUsageIT.java similarity index 61% rename from x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java rename to x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformUsageIT.java index e936606d127..3c45bb34512 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/transform/integration/TransformUsageIT.java @@ -4,15 +4,14 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.integration; +package org.elasticsearch.xpack.transform.integration; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.xcontent.support.XContentMapValues; - -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStoredDoc; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformStoredDoc; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndex; import org.junit.Before; import java.io.IOException; @@ -21,10 +20,10 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.core.dataframe.DataFrameField.INDEX_DOC_TYPE; -import static org.elasticsearch.xpack.dataframe.DataFrameFeatureSet.PROVIDED_STATS; +import static org.elasticsearch.xpack.core.transform.TransformField.INDEX_DOC_TYPE; +import static org.elasticsearch.xpack.transform.TransformFeatureSet.PROVIDED_STATS; -public class DataFrameUsageIT extends DataFrameRestTestCase { +public class TransformUsageIT extends TransformRestTestCase { @Before public void createIndexes() throws IOException { @@ -35,11 +34,11 @@ public class DataFrameUsageIT extends DataFrameRestTestCase { Response usageResponse = client().performRequest(new Request("GET", "_xpack/usage")); Map usageAsMap = entityAsMap(usageResponse); - assertTrue((boolean) XContentMapValues.extractValue("data_frame.available", usageAsMap)); - assertTrue((boolean) XContentMapValues.extractValue("data_frame.enabled", usageAsMap)); + assertTrue((boolean) XContentMapValues.extractValue("transform.available", usageAsMap)); + assertTrue((boolean) XContentMapValues.extractValue("transform.enabled", usageAsMap)); // no transforms, no stats - assertEquals(null, XContentMapValues.extractValue("data_frame.transforms", usageAsMap)); - assertEquals(null, XContentMapValues.extractValue("data_frame.stats", usageAsMap)); + assertEquals(null, XContentMapValues.extractValue("transform.transforms", usageAsMap)); + assertEquals(null, XContentMapValues.extractValue("transform.stats", usageAsMap)); // create transforms createPivotReviewsTransform("test_usage", "pivot_reviews", null); @@ -47,16 +46,16 @@ public class DataFrameUsageIT extends DataFrameRestTestCase { createContinuousPivotReviewsTransform("test_usage_continuous", "pivot_reviews_continuous", null); usageResponse = client().performRequest(new Request("GET", "_xpack/usage")); usageAsMap = entityAsMap(usageResponse); - assertEquals(3, XContentMapValues.extractValue("data_frame.transforms._all", usageAsMap)); - assertEquals(3, XContentMapValues.extractValue("data_frame.transforms.stopped", usageAsMap)); + assertEquals(3, XContentMapValues.extractValue("transform.transforms._all", usageAsMap)); + assertEquals(3, XContentMapValues.extractValue("transform.transforms.stopped", usageAsMap)); startAndWaitForTransform("test_usage", "pivot_reviews"); - stopDataFrameTransform("test_usage", false); + stopTransform("test_usage", false); Request statsExistsRequest = new Request("GET", - DataFrameInternalIndex.LATEST_INDEX_NAME+"/_search?q=" + + TransformInternalIndex.LATEST_INDEX_NAME+"/_search?q=" + INDEX_DOC_TYPE.getPreferredName() + ":" + - DataFrameTransformStoredDoc.NAME); + TransformStoredDoc.NAME); // Verify that we have one stat document assertBusy(() -> { Map hasStatsMap = entityAsMap(client().performRequest(statsExistsRequest)); @@ -65,7 +64,7 @@ public class DataFrameUsageIT extends DataFrameRestTestCase { startAndWaitForContinuousTransform("test_usage_continuous", "pivot_reviews_continuous", null); - Request getRequest = new Request("GET", DATAFRAME_ENDPOINT + "test_usage/_stats"); + Request getRequest = new Request("GET", TRANSFORM_ENDPOINT + "test_usage/_stats"); Map stats = entityAsMap(client().performRequest(getRequest)); Map expectedStats = new HashMap<>(); for(String statName : PROVIDED_STATS) { @@ -83,29 +82,29 @@ public class DataFrameUsageIT extends DataFrameRestTestCase { Response response = client().performRequest(new Request("GET", "_xpack/usage")); Map statsMap = entityAsMap(response); // we should see some stats - assertEquals(3, XContentMapValues.extractValue("data_frame.transforms._all", statsMap)); - assertEquals(2, XContentMapValues.extractValue("data_frame.transforms.stopped", statsMap)); - assertEquals(1, XContentMapValues.extractValue("data_frame.transforms.started", statsMap)); + assertEquals(3, XContentMapValues.extractValue("transform.transforms._all", statsMap)); + assertEquals(2, XContentMapValues.extractValue("transform.transforms.stopped", statsMap)); + assertEquals(1, XContentMapValues.extractValue("transform.transforms.started", statsMap)); for(String statName : PROVIDED_STATS) { - if (statName.equals(DataFrameIndexerTransformStats.INDEX_TIME_IN_MS.getPreferredName()) - ||statName.equals(DataFrameIndexerTransformStats.SEARCH_TIME_IN_MS.getPreferredName())) { + if (statName.equals(TransformIndexerStats.INDEX_TIME_IN_MS.getPreferredName()) + ||statName.equals(TransformIndexerStats.SEARCH_TIME_IN_MS.getPreferredName())) { continue; } assertEquals("Incorrect stat " + statName, expectedStats.get(statName) * 2, - XContentMapValues.extractValue("data_frame.stats." + statName, statsMap)); + XContentMapValues.extractValue("transform.stats." + statName, statsMap)); } // Refresh the index so that statistics are searchable - refreshIndex(DataFrameInternalIndex.LATEST_INDEX_VERSIONED_NAME); + refreshIndex(TransformInternalIndex.LATEST_INDEX_VERSIONED_NAME); }, 60, TimeUnit.SECONDS); - stopDataFrameTransform("test_usage_continuous", false); + stopTransform("test_usage_continuous", false); usageResponse = client().performRequest(new Request("GET", "_xpack/usage")); usageAsMap = entityAsMap(usageResponse); - assertEquals(3, XContentMapValues.extractValue("data_frame.transforms._all", usageAsMap)); - assertEquals(3, XContentMapValues.extractValue("data_frame.transforms.stopped", usageAsMap)); + assertEquals(3, XContentMapValues.extractValue("transform.transforms._all", usageAsMap)); + assertEquals(3, XContentMapValues.extractValue("transform.transforms.stopped", usageAsMap)); } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java new file mode 100644 index 00000000000..5c0a637dbed --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java @@ -0,0 +1,252 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.NamedXContentRegistry.Entry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.PersistentTaskPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.threadpool.FixedExecutorBuilder; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; +import org.elasticsearch.xpack.core.transform.TransformNamedXContentProvider; +import org.elasticsearch.xpack.core.transform.action.DeleteTransformAction; +import org.elasticsearch.xpack.core.transform.action.GetTransformsAction; +import org.elasticsearch.xpack.core.transform.action.GetTransformsStatsAction; +import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction; +import org.elasticsearch.xpack.core.transform.action.PutTransformAction; +import org.elasticsearch.xpack.core.transform.action.StartTransformAction; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction; +import org.elasticsearch.xpack.core.transform.action.UpdateTransformAction; +import org.elasticsearch.xpack.transform.action.TransportDeleteTransformAction; +import org.elasticsearch.xpack.transform.action.TransportGetTransformsAction; +import org.elasticsearch.xpack.transform.action.TransportGetTransformsStatsAction; +import org.elasticsearch.xpack.transform.action.TransportPreviewTransformAction; +import org.elasticsearch.xpack.transform.action.TransportPutTransformAction; +import org.elasticsearch.xpack.transform.action.TransportStartTransformAction; +import org.elasticsearch.xpack.transform.action.TransportStopTransformAction; +import org.elasticsearch.xpack.transform.action.TransportUpdateTransformAction; +import org.elasticsearch.xpack.transform.checkpoint.TransformCheckpointService; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndex; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; +import org.elasticsearch.xpack.transform.rest.action.RestDeleteTransformAction; +import org.elasticsearch.xpack.transform.rest.action.RestGetTransformAction; +import org.elasticsearch.xpack.transform.rest.action.RestGetTransformStatsAction; +import org.elasticsearch.xpack.transform.rest.action.RestPreviewTransformAction; +import org.elasticsearch.xpack.transform.rest.action.RestPutTransformAction; +import org.elasticsearch.xpack.transform.rest.action.RestStartTransformAction; +import org.elasticsearch.xpack.transform.rest.action.RestStopTransformAction; +import org.elasticsearch.xpack.transform.rest.action.RestUpdateTransformAction; +import org.elasticsearch.xpack.transform.transforms.TransformPersistentTasksExecutor; +import org.elasticsearch.xpack.transform.transforms.TransformTask; + +import java.io.IOException; +import java.time.Clock; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; + +import static java.util.Collections.emptyList; + +public class Transform extends Plugin implements ActionPlugin, PersistentTaskPlugin { + + public static final String NAME = "transform"; + public static final String TASK_THREAD_POOL_NAME = "transform_indexing"; + + private static final Logger logger = LogManager.getLogger(Transform.class); + + private final boolean enabled; + private final Settings settings; + private final boolean transportClientMode; + private final SetOnce transformConfigManager = new SetOnce<>(); + private final SetOnce transformAuditor = new SetOnce<>(); + private final SetOnce transformCheckpointService = new SetOnce<>(); + private final SetOnce schedulerEngine = new SetOnce<>(); + + public Transform(Settings settings) { + this.settings = settings; + this.enabled = XPackSettings.TRANSFORM_ENABLED.get(settings); + this.transportClientMode = XPackPlugin.transportClientMode(settings); + } + + @Override + public Collection createGuiceModules() { + List modules = new ArrayList<>(); + + if (transportClientMode) { + return modules; + } + + modules.add(b -> XPackPlugin.bindFeatureSet(b, TransformFeatureSet.class)); + return modules; + } + + protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + + @Override + public List getRestHandlers(final Settings settings, final RestController restController, + final ClusterSettings clusterSettings, final IndexScopedSettings indexScopedSettings, final SettingsFilter settingsFilter, + final IndexNameExpressionResolver indexNameExpressionResolver, final Supplier nodesInCluster) { + + if (!enabled) { + return emptyList(); + } + + return Arrays.asList( + new RestPutTransformAction(restController), + new RestStartTransformAction(restController), + new RestStopTransformAction(restController), + new RestDeleteTransformAction(restController), + new RestGetTransformAction(restController), + new RestGetTransformStatsAction(restController), + new RestPreviewTransformAction(restController), + new RestUpdateTransformAction(restController) + ); + } + + @Override + public List> getActions() { + if (!enabled) { + return emptyList(); + } + + return Arrays.asList( + new ActionHandler<>(PutTransformAction.INSTANCE, TransportPutTransformAction.class), + new ActionHandler<>(StartTransformAction.INSTANCE, TransportStartTransformAction.class), + new ActionHandler<>(StopTransformAction.INSTANCE, TransportStopTransformAction.class), + new ActionHandler<>(DeleteTransformAction.INSTANCE, TransportDeleteTransformAction.class), + new ActionHandler<>(GetTransformsAction.INSTANCE, TransportGetTransformsAction.class), + new ActionHandler<>(GetTransformsStatsAction.INSTANCE, TransportGetTransformsStatsAction.class), + new ActionHandler<>(PreviewTransformAction.INSTANCE, TransportPreviewTransformAction.class), + new ActionHandler<>(UpdateTransformAction.INSTANCE, TransportUpdateTransformAction.class) + ); + } + + @Override + public List> getExecutorBuilders(Settings settings) { + if (false == enabled || transportClientMode) { + return emptyList(); + } + + FixedExecutorBuilder indexing = new FixedExecutorBuilder(settings, TASK_THREAD_POOL_NAME, 4, 4, + "data_frame.task_thread_pool"); + + return Collections.singletonList(indexing); + } + + @Override + public Collection createComponents(Client client, ClusterService clusterService, ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, + Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { + if (enabled == false || transportClientMode) { + return emptyList(); + } + transformAuditor.set(new TransformAuditor(client, clusterService.getNodeName())); + transformConfigManager.set(new TransformConfigManager(client, xContentRegistry)); + transformCheckpointService.set(new TransformCheckpointService(client, + transformConfigManager.get(), + transformAuditor.get())); + + return Arrays.asList(transformConfigManager.get(), transformAuditor.get(), transformCheckpointService.get()); + } + + @Override + public UnaryOperator> getIndexTemplateMetaDataUpgrader() { + return templates -> { + try { + templates.put(TransformInternalIndex.LATEST_INDEX_VERSIONED_NAME, TransformInternalIndex.getIndexTemplateMetaData()); + } catch (IOException e) { + logger.error("Error creating data frame index template", e); + } + try { + templates.put(TransformInternalIndex.AUDIT_INDEX, TransformInternalIndex.getAuditIndexTemplateMetaData()); + } catch (IOException e) { + logger.warn("Error creating data frame audit index", e); + } + return templates; + }; + } + + @Override + public List> getPersistentTasksExecutor(ClusterService clusterService, ThreadPool threadPool, + Client client, SettingsModule settingsModule) { + if (enabled == false || transportClientMode) { + return emptyList(); + } + + schedulerEngine.set(new SchedulerEngine(settings, Clock.systemUTC())); + + // the transforms config manager should have been created + assert transformConfigManager.get() != null; + // the auditor should have been created + assert transformAuditor.get() != null; + assert transformCheckpointService.get() != null; + + return Collections.singletonList( + new TransformPersistentTasksExecutor(client, + transformConfigManager.get(), + transformCheckpointService.get(), + schedulerEngine.get(), + transformAuditor.get(), + threadPool, + clusterService, + settingsModule.getSettings())); + } + + @Override + public List> getSettings() { + return Collections.singletonList(TransformTask.NUM_FAILURE_RETRIES_SETTING); + } + + @Override + public void close() { + if (schedulerEngine.get() != null) { + schedulerEngine.get().stop(); + } + } + + @Override + public List getNamedXContent() { + return new TransformNamedXContentProvider().getNamedXContentParsers(); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformFeatureSet.java similarity index 66% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformFeatureSet.java index 1e595b71502..bb48c563ac4 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformFeatureSet.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe; +package org.elasticsearch.xpack.transform; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -29,15 +29,15 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.dataframe.DataFrameFeatureSetUsage; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStoredDoc; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.elasticsearch.xpack.core.transform.TransformFeatureSetUsage; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformState; +import org.elasticsearch.xpack.core.transform.transforms.TransformStoredDoc; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndex; import java.util.ArrayList; import java.util.Arrays; @@ -49,31 +49,31 @@ import java.util.Map; import java.util.Objects; -public class DataFrameFeatureSet implements XPackFeatureSet { +public class TransformFeatureSet implements XPackFeatureSet { private final boolean enabled; private final Client client; private final XPackLicenseState licenseState; private final ClusterService clusterService; - private static final Logger logger = LogManager.getLogger(DataFrameFeatureSet.class); + private static final Logger logger = LogManager.getLogger(TransformFeatureSet.class); public static final String[] PROVIDED_STATS = new String[] { - DataFrameIndexerTransformStats.NUM_PAGES.getPreferredName(), - DataFrameIndexerTransformStats.NUM_INPUT_DOCUMENTS.getPreferredName(), - DataFrameIndexerTransformStats.NUM_OUTPUT_DOCUMENTS.getPreferredName(), - DataFrameIndexerTransformStats.NUM_INVOCATIONS.getPreferredName(), - DataFrameIndexerTransformStats.INDEX_TIME_IN_MS.getPreferredName(), - DataFrameIndexerTransformStats.SEARCH_TIME_IN_MS.getPreferredName(), - DataFrameIndexerTransformStats.INDEX_TOTAL.getPreferredName(), - DataFrameIndexerTransformStats.SEARCH_TOTAL.getPreferredName(), - DataFrameIndexerTransformStats.INDEX_FAILURES.getPreferredName(), - DataFrameIndexerTransformStats.SEARCH_FAILURES.getPreferredName(), + TransformIndexerStats.NUM_PAGES.getPreferredName(), + TransformIndexerStats.NUM_INPUT_DOCUMENTS.getPreferredName(), + TransformIndexerStats.NUM_OUTPUT_DOCUMENTS.getPreferredName(), + TransformIndexerStats.NUM_INVOCATIONS.getPreferredName(), + TransformIndexerStats.INDEX_TIME_IN_MS.getPreferredName(), + TransformIndexerStats.SEARCH_TIME_IN_MS.getPreferredName(), + TransformIndexerStats.INDEX_TOTAL.getPreferredName(), + TransformIndexerStats.SEARCH_TOTAL.getPreferredName(), + TransformIndexerStats.INDEX_FAILURES.getPreferredName(), + TransformIndexerStats.SEARCH_FAILURES.getPreferredName(), }; @Inject - public DataFrameFeatureSet(Settings settings, ClusterService clusterService, Client client, @Nullable XPackLicenseState licenseState) { - this.enabled = XPackSettings.DATA_FRAME_ENABLED.get(settings); + public TransformFeatureSet(Settings settings, ClusterService clusterService, Client client, @Nullable XPackLicenseState licenseState) { + this.enabled = XPackSettings.TRANSFORM_ENABLED.get(settings); this.client = Objects.requireNonNull(client); this.clusterService = Objects.requireNonNull(clusterService); this.licenseState = licenseState; @@ -81,12 +81,12 @@ public class DataFrameFeatureSet implements XPackFeatureSet { @Override public String name() { - return XPackField.DATA_FRAME; + return XPackField.TRANSFORM; } @Override public boolean available() { - return licenseState != null && licenseState.isDataFrameAllowed(); + return licenseState != null && licenseState.isTransformAllowed(); } @Override @@ -102,26 +102,26 @@ public class DataFrameFeatureSet implements XPackFeatureSet { @Override public void usage(ActionListener listener) { if (enabled == false) { - listener.onResponse(new DataFrameFeatureSetUsage(available(), + listener.onResponse(new TransformFeatureSetUsage(available(), enabled(), Collections.emptyMap(), - new DataFrameIndexerTransformStats())); + new TransformIndexerStats())); return; } PersistentTasksCustomMetaData taskMetadata = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(clusterService.state()); - Collection> dataFrameTasks = taskMetadata == null ? + Collection> transformTasks = taskMetadata == null ? Collections.emptyList() : - taskMetadata.findTasks(DataFrameTransform.NAME, (t) -> true); - final int taskCount = dataFrameTasks.size(); + taskMetadata.findTasks(TransformTaskParams.NAME, (t) -> true); + final int taskCount = transformTasks.size(); final Map transformsCountByState = new HashMap<>(); - for(PersistentTasksCustomMetaData.PersistentTask dataFrameTask : dataFrameTasks) { - DataFrameTransformState state = (DataFrameTransformState)dataFrameTask.getState(); + for(PersistentTasksCustomMetaData.PersistentTask transformTask : transformTasks) { + TransformState state = (TransformState)transformTask.getState(); transformsCountByState.merge(state.getTaskState().value(), 1L, Long::sum); } - ActionListener totalStatsListener = ActionListener.wrap( - statSummations -> listener.onResponse(new DataFrameFeatureSetUsage(available(), + ActionListener totalStatsListener = ActionListener.wrap( + statSummations -> listener.onResponse(new TransformFeatureSetUsage(available(), enabled(), transformsCountByState, statSummations)), @@ -136,13 +136,13 @@ public class DataFrameFeatureSet implements XPackFeatureSet { } long totalTransforms = transformCountSuccess.getHits().getTotalHits().value; if (totalTransforms == 0) { - listener.onResponse(new DataFrameFeatureSetUsage(available(), + listener.onResponse(new TransformFeatureSetUsage(available(), enabled(), transformsCountByState, - new DataFrameIndexerTransformStats())); + new TransformIndexerStats())); return; } - transformsCountByState.merge(DataFrameTransformTaskState.STOPPED.value(), totalTransforms - taskCount, Long::sum); + transformsCountByState.merge(TransformTaskState.STOPPED.value(), totalTransforms - taskCount, Long::sum); getStatisticSummations(client, totalStatsListener); }, transformCountFailure -> { @@ -154,20 +154,20 @@ public class DataFrameFeatureSet implements XPackFeatureSet { } ); - SearchRequest totalTransformCount = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) + SearchRequest totalTransformCount = client.prepareSearch(TransformInternalIndex.INDEX_NAME_PATTERN) .setTrackTotalHits(true) .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() - .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), DataFrameTransformConfig.NAME)))) + .filter(QueryBuilders.termQuery(TransformField.INDEX_DOC_TYPE.getPreferredName(), TransformConfig.NAME)))) .request(); ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ClientHelper.DATA_FRAME_ORIGIN, + ClientHelper.TRANSFORM_ORIGIN, totalTransformCount, totalTransformCountListener, client::search); } - static DataFrameIndexerTransformStats parseSearchAggs(SearchResponse searchResponse) { + static TransformIndexerStats parseSearchAggs(SearchResponse searchResponse) { List statisticsList = new ArrayList<>(PROVIDED_STATS.length); for(String statName : PROVIDED_STATS) { @@ -179,7 +179,7 @@ public class DataFrameFeatureSet implements XPackFeatureSet { statisticsList.add(0L); } } - return new DataFrameIndexerTransformStats(statisticsList.get(0), // numPages + return new TransformIndexerStats(statisticsList.get(0), // numPages statisticsList.get(1), // numInputDocuments statisticsList.get(2), // numOutputDocuments statisticsList.get(3), // numInvocations @@ -191,16 +191,16 @@ public class DataFrameFeatureSet implements XPackFeatureSet { statisticsList.get(9)); // searchFailures } - static void getStatisticSummations(Client client, ActionListener statsListener) { + static void getStatisticSummations(Client client, ActionListener statsListener) { QueryBuilder queryBuilder = QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() - .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), - DataFrameTransformStoredDoc.NAME))); + .filter(QueryBuilders.termQuery(TransformField.INDEX_DOC_TYPE.getPreferredName(), + TransformStoredDoc.NAME))); - SearchRequestBuilder requestBuilder = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) + SearchRequestBuilder requestBuilder = client.prepareSearch(TransformInternalIndex.INDEX_NAME_PATTERN) .setSize(0) .setQuery(queryBuilder); - final String path = DataFrameField.STATS_FIELD.getPreferredName() + "."; + final String path = TransformField.STATS_FIELD.getPreferredName() + "."; for(String statName : PROVIDED_STATS) { requestBuilder.addAggregation(AggregationBuilders.sum(statName).field(path + statName)); } @@ -216,14 +216,14 @@ public class DataFrameFeatureSet implements XPackFeatureSet { }, failure -> { if (failure instanceof ResourceNotFoundException) { - statsListener.onResponse(new DataFrameIndexerTransformStats()); + statsListener.onResponse(new TransformIndexerStats()); } else { statsListener.onFailure(failure); } } ); ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ClientHelper.DATA_FRAME_ORIGIN, + ClientHelper.TRANSFORM_ORIGIN, requestBuilder.request(), getStatisticSummationsListener, client::search); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodes.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransformNodes.java similarity index 65% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodes.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransformNodes.java index 1b2c54b331f..b05065f3a34 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodes.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransformNodes.java @@ -4,30 +4,30 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.action; +package org.elasticsearch.xpack.transform.action; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.transform.TransformField; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; -public final class DataFrameNodes { +public final class TransformNodes { - private DataFrameNodes() { + private TransformNodes() { } /** - * Get the list of nodes the data frames are executing on + * Get the list of nodes transforms are executing on * - * @param dataFrameIds The data frames. + * @param transformIds The transforms. * @param clusterState State * @return The executor nodes */ - public static String[] dataFrameTaskNodes(List dataFrameIds, ClusterState clusterState) { + public static String[] transformTaskNodes(List transformIds, ClusterState clusterState) { Set executorNodes = new HashSet<>(); @@ -35,10 +35,10 @@ public final class DataFrameNodes { PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(clusterState); if (tasksMetaData != null) { - Set dataFrameIdsSet = new HashSet<>(dataFrameIds); + Set transformIdsSet = new HashSet<>(transformIds); Collection> tasks = - tasksMetaData.findTasks(DataFrameField.TASK_NAME, t -> dataFrameIdsSet.contains(t.getId())); + tasksMetaData.findTasks(TransformField.TASK_NAME, t -> transformIdsSet.contains(t.getId())); for (PersistentTasksCustomMetaData.PersistentTask task : tasks) { executorNodes.add(task.getExecutorNode()); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportDeleteTransformAction.java similarity index 72% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportDeleteTransformAction.java index fea44473622..5abd56dadb6 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportDeleteTransformAction.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.action; +package org.elasticsearch.xpack.transform.action; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; @@ -22,29 +22,29 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Request; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.core.transform.action.DeleteTransformAction; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction; +import org.elasticsearch.xpack.core.transform.action.DeleteTransformAction.Request; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; import java.io.IOException; -import static org.elasticsearch.xpack.core.ClientHelper.DATA_FRAME_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -public class TransportDeleteDataFrameTransformAction extends TransportMasterNodeAction { +public class TransportDeleteTransformAction extends TransportMasterNodeAction { - private final DataFrameTransformsConfigManager transformsConfigManager; - private final DataFrameAuditor auditor; + private final TransformConfigManager transformsConfigManager; + private final TransformAuditor auditor; private final Client client; @Inject - public TransportDeleteDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool, + public TransportDeleteTransformAction(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - DataFrameTransformsConfigManager transformsConfigManager, DataFrameAuditor auditor, + TransformConfigManager transformsConfigManager, TransformAuditor auditor, Client client) { - super(DeleteDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, + super(DeleteTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); this.transformsConfigManager = transformsConfigManager; this.auditor = auditor; @@ -66,14 +66,14 @@ public class TransportDeleteDataFrameTransformAction extends TransportMasterNode ActionListener listener) { final PersistentTasksCustomMetaData pTasksMeta = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); if (pTasksMeta != null && pTasksMeta.getTask(request.getId()) != null && request.isForce() == false) { - listener.onFailure(new ElasticsearchStatusException("Cannot delete data frame [" + request.getId() + + listener.onFailure(new ElasticsearchStatusException("Cannot delete transform [" + request.getId() + "] as the task is running. Stop the task first", RestStatus.CONFLICT)); } else { ActionListener stopTransformActionListener = ActionListener.wrap( stopResponse -> transformsConfigManager.deleteTransform(request.getId(), ActionListener.wrap( r -> { - auditor.info(request.getId(), "Deleted data frame transform."); + auditor.info(request.getId(), "Deleted transform."); listener.onResponse(new AcknowledgedResponse(r)); }, listener::onFailure)), @@ -82,9 +82,9 @@ public class TransportDeleteDataFrameTransformAction extends TransportMasterNode if (pTasksMeta != null && pTasksMeta.getTask(request.getId()) != null) { executeAsyncWithOrigin(client, - DATA_FRAME_ORIGIN, - StopDataFrameTransformAction.INSTANCE, - new StopDataFrameTransformAction.Request(request.getId(), true, true, null, true), + TRANSFORM_ORIGIN, + StopTransformAction.INSTANCE, + new StopTransformAction.Request(request.getId(), true, true, null, true), ActionListener.wrap( r -> stopTransformActionListener.onResponse(null), stopTransformActionListener::onFailure)); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformsAction.java similarity index 61% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformsAction.java index 8fc03681b24..9ffe5941ee5 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformsAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.action; +package org.elasticsearch.xpack.transform.action; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; @@ -22,25 +22,25 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.action.AbstractTransportGetResourcesAction; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsAction; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsAction.Request; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsAction.Response; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.GetTransformsAction; +import org.elasticsearch.xpack.core.transform.action.GetTransformsAction.Request; +import org.elasticsearch.xpack.core.transform.action.GetTransformsAction.Response; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndex; -import static org.elasticsearch.xpack.core.dataframe.DataFrameField.INDEX_DOC_TYPE; +import static org.elasticsearch.xpack.core.transform.TransformField.INDEX_DOC_TYPE; -public class TransportGetDataFrameTransformsAction extends AbstractTransportGetResourcesAction { @Inject - public TransportGetDataFrameTransformsAction(TransportService transportService, ActionFilters actionFilters, + public TransportGetTransformsAction(TransportService transportService, ActionFilters actionFilters, Client client, NamedXContentRegistry xContentRegistry) { - super(GetDataFrameTransformsAction.NAME, transportService, actionFilters, Request::new, client, xContentRegistry); + super(GetTransformsAction.NAME, transportService, actionFilters, Request::new, client, xContentRegistry); } @Override @@ -53,38 +53,38 @@ public class TransportGetDataFrameTransformsAction extends AbstractTransportGetR @Override protected ParseField getResultsField() { - return DataFrameField.TRANSFORMS; + return TransformField.TRANSFORMS; } @Override protected String[] getIndices() { - return new String[]{DataFrameInternalIndex.INDEX_NAME_PATTERN}; + return new String[]{TransformInternalIndex.INDEX_NAME_PATTERN}; } @Override - protected DataFrameTransformConfig parse(XContentParser parser) { - return DataFrameTransformConfig.fromXContent(parser, null, true); + protected TransformConfig parse(XContentParser parser) { + return TransformConfig.fromXContent(parser, null, true); } @Override protected ResourceNotFoundException notFoundException(String resourceId) { return new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, resourceId)); + TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, resourceId)); } @Override protected String executionOrigin() { - return ClientHelper.DATA_FRAME_ORIGIN; + return ClientHelper.TRANSFORM_ORIGIN; } @Override - protected String extractIdFromResource(DataFrameTransformConfig transformConfig) { + protected String extractIdFromResource(TransformConfig transformConfig) { return transformConfig.getId(); } @Override protected QueryBuilder additionalQuery() { - return QueryBuilders.termQuery(INDEX_DOC_TYPE.getPreferredName(), DataFrameTransformConfig.NAME); + return QueryBuilders.termQuery(INDEX_DOC_TYPE.getPreferredName(), TransformConfig.NAME); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformsStatsAction.java similarity index 68% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformsStatsAction.java index c3b6bd39564..257aa6b9fdf 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformsStatsAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.action; +package org.elasticsearch.xpack.transform.action; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -23,17 +23,17 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction.Request; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction.Response; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStoredDoc; -import org.elasticsearch.xpack.core.dataframe.transforms.NodeAttributes; -import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; -import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; +import org.elasticsearch.xpack.core.transform.action.GetTransformsStatsAction; +import org.elasticsearch.xpack.core.transform.action.GetTransformsStatsAction.Request; +import org.elasticsearch.xpack.core.transform.action.GetTransformsStatsAction.Response; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointingInfo; +import org.elasticsearch.xpack.core.transform.transforms.TransformState; +import org.elasticsearch.xpack.core.transform.transforms.TransformStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformStoredDoc; +import org.elasticsearch.xpack.core.transform.transforms.NodeAttributes; +import org.elasticsearch.xpack.transform.checkpoint.TransformCheckpointService; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; +import org.elasticsearch.xpack.transform.transforms.TransformTask; import java.util.ArrayList; import java.util.Collections; @@ -45,34 +45,34 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; -public class TransportGetDataFrameTransformsStatsAction extends - TransportTasksAction { +public class TransportGetTransformsStatsAction extends + TransportTasksAction { - private static final Logger logger = LogManager.getLogger(TransportGetDataFrameTransformsStatsAction.class); + private static final Logger logger = LogManager.getLogger(TransportGetTransformsStatsAction.class); - private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; - private final DataFrameTransformsCheckpointService transformsCheckpointService; + private final TransformConfigManager transformConfigManager; + private final TransformCheckpointService transformCheckpointService; @Inject - public TransportGetDataFrameTransformsStatsAction(TransportService transportService, ActionFilters actionFilters, - ClusterService clusterService, - DataFrameTransformsConfigManager dataFrameTransformsConfigManager, - DataFrameTransformsCheckpointService transformsCheckpointService) { - super(GetDataFrameTransformsStatsAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, + public TransportGetTransformsStatsAction(TransportService transportService, ActionFilters actionFilters, + ClusterService clusterService, + TransformConfigManager transformsConfigManager, + TransformCheckpointService transformsCheckpointService) { + super(GetTransformsStatsAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, Response::new, ThreadPool.Names.SAME); - this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; - this.transformsCheckpointService = transformsCheckpointService; + this.transformConfigManager = transformsConfigManager; + this.transformCheckpointService = transformsCheckpointService; } @Override protected Response newResponse(Request request, List tasks, List taskOperationFailures, List failedNodeExceptions) { - List responses = tasks.stream() + List responses = tasks.stream() .flatMap(r -> r.getTransformsStats().stream()) - .sorted(Comparator.comparing(DataFrameTransformStats::getId)) + .sorted(Comparator.comparing(TransformStats::getId)) .collect(Collectors.toList()); List allFailedNodeExceptions = new ArrayList<>(failedNodeExceptions); allFailedNodeExceptions.addAll(tasks.stream().flatMap(r -> r.getNodeFailures().stream()).collect(Collectors.toList())); @@ -80,16 +80,16 @@ public class TransportGetDataFrameTransformsStatsAction extends } @Override - protected void taskOperation(Request request, DataFrameTransformTask task, ActionListener listener) { + protected void taskOperation(Request request, TransformTask task, ActionListener listener) { // Little extra insurance, make sure we only return transforms that aren't cancelled ClusterState state = clusterService.state(); String nodeId = state.nodes().getLocalNode().getId(); if (task.isCancelled() == false) { - DataFrameTransformState transformState = task.getState(); - task.getCheckpointingInfo(transformsCheckpointService, ActionListener.wrap( + TransformState transformState = task.getState(); + task.getCheckpointingInfo(transformCheckpointService, ActionListener.wrap( checkpointingInfo -> listener.onResponse(new Response( - Collections.singletonList(new DataFrameTransformStats(task.getTransformId(), - DataFrameTransformStats.State.fromComponents(transformState.getTaskState(), transformState.getIndexerState()), + Collections.singletonList(new TransformStats(task.getTransformId(), + TransformStats.State.fromComponents(transformState.getTaskState(), transformState.getIndexerState()), transformState.getReason(), null, task.getStats(), @@ -98,12 +98,12 @@ public class TransportGetDataFrameTransformsStatsAction extends e -> { logger.warn("Failed to retrieve checkpointing info for transform [" + task.getTransformId() + "]", e); listener.onResponse(new Response( - Collections.singletonList(new DataFrameTransformStats(task.getTransformId(), - DataFrameTransformStats.State.fromComponents(transformState.getTaskState(), transformState.getIndexerState()), + Collections.singletonList(new TransformStats(task.getTransformId(), + TransformStats.State.fromComponents(transformState.getTaskState(), transformState.getIndexerState()), transformState.getReason(), null, task.getStats(), - DataFrameTransformCheckpointingInfo.EMPTY)), + TransformCheckpointingInfo.EMPTY)), 1L, Collections.emptyList(), Collections.singletonList(new FailedNodeException(nodeId, "Failed to retrieve checkpointing info", e)))); @@ -116,13 +116,13 @@ public class TransportGetDataFrameTransformsStatsAction extends @Override protected void doExecute(Task task, Request request, ActionListener finalListener) { - dataFrameTransformsConfigManager.expandTransformIds(request.getId(), + transformConfigManager.expandTransformIds(request.getId(), request.getPageParams(), request.isAllowNoMatch(), ActionListener.wrap(hitsAndIds -> { request.setExpandedIds(hitsAndIds.v2()); final ClusterState state = clusterService.state(); - request.setNodes(DataFrameNodes.dataFrameTaskNodes(hitsAndIds.v2(), state)); + request.setNodes(TransformNodes.transformTaskNodes(hitsAndIds.v2(), state)); super.doExecute(task, request, ActionListener.wrap( response -> { PersistentTasksCustomMetaData tasksInProgress = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); @@ -152,13 +152,13 @@ public class TransportGetDataFrameTransformsStatsAction extends )); } - private static void setNodeAttributes(DataFrameTransformStats dataFrameTransformStats, + private static void setNodeAttributes(TransformStats transformStats, PersistentTasksCustomMetaData persistentTasksCustomMetaData, ClusterState state) { PersistentTasksCustomMetaData.PersistentTask pTask = - persistentTasksCustomMetaData.getTask(dataFrameTransformStats.getId()); + persistentTasksCustomMetaData.getTask(transformStats.getId()); if (pTask != null) { - dataFrameTransformStats.setNode(NodeAttributes.fromDiscoveryNode(state.nodes().get(pTask.getExecutorNode()))); + transformStats.setNode(NodeAttributes.fromDiscoveryNode(state.nodes().get(pTask.getExecutorNode()))); } } @@ -172,7 +172,7 @@ public class TransportGetDataFrameTransformsStatsAction extends } Set transformsWithoutTasks = new HashSet<>(request.getExpandedIds()); - transformsWithoutTasks.removeAll(response.getTransformsStats().stream().map(DataFrameTransformStats::getId) + transformsWithoutTasks.removeAll(response.getTransformsStats().stream().map(TransformStats::getId) .collect(Collectors.toList())); // Small assurance that we are at least below the max. Terms search has a hard limit of 10k, we should at least be below that. @@ -181,22 +181,22 @@ public class TransportGetDataFrameTransformsStatsAction extends // If the persistent task does NOT exist, it is STOPPED // There is a potential race condition where the saved document does not actually have a STOPPED state // as the task is cancelled before we persist state. - ActionListener> searchStatsListener = ActionListener.wrap( + ActionListener> searchStatsListener = ActionListener.wrap( statsForTransformsWithoutTasks -> { - List allStateAndStats = response.getTransformsStats(); + List allStateAndStats = response.getTransformsStats(); addCheckpointingInfoForTransformsWithoutTasks(allStateAndStats, statsForTransformsWithoutTasks, ActionListener.wrap( aVoid -> { transformsWithoutTasks.removeAll(statsForTransformsWithoutTasks.stream() - .map(DataFrameTransformStoredDoc::getId).collect(Collectors.toSet())); + .map(TransformStoredDoc::getId).collect(Collectors.toSet())); // Transforms that have not been started and have no state or stats. transformsWithoutTasks.forEach( - transformId -> allStateAndStats.add(DataFrameTransformStats.initialStats(transformId))); + transformId -> allStateAndStats.add(TransformStats.initialStats(transformId))); // Any transform in collection could NOT have a task, so, even though the list is initially sorted // it can easily become arbitrarily ordered based on which transforms don't have a task or stats docs - allStateAndStats.sort(Comparator.comparing(DataFrameTransformStats::getId)); + allStateAndStats.sort(Comparator.comparing(TransformStats::getId)); listener.onResponse(new Response(allStateAndStats, allStateAndStats.size(), @@ -214,12 +214,12 @@ public class TransportGetDataFrameTransformsStatsAction extends } ); - dataFrameTransformsConfigManager.getTransformStoredDoc(transformsWithoutTasks, searchStatsListener); + transformConfigManager.getTransformStoredDoc(transformsWithoutTasks, searchStatsListener); } - private void populateSingleStoppedTransformStat(DataFrameTransformStoredDoc transform, - ActionListener listener) { - transformsCheckpointService.getCheckpointingInfo( + private void populateSingleStoppedTransformStat(TransformStoredDoc transform, + ActionListener listener) { + transformCheckpointService.getCheckpointingInfo( transform.getId(), transform.getTransformState().getCheckpoint(), transform.getTransformState().getPosition(), @@ -228,13 +228,13 @@ public class TransportGetDataFrameTransformsStatsAction extends listener::onResponse, e -> { logger.warn("Failed to retrieve checkpointing info for transform [" + transform.getId() + "]", e); - listener.onResponse(DataFrameTransformCheckpointingInfo.EMPTY); + listener.onResponse(TransformCheckpointingInfo.EMPTY); } )); } - private void addCheckpointingInfoForTransformsWithoutTasks(List allStateAndStats, - List statsForTransformsWithoutTasks, + private void addCheckpointingInfoForTransformsWithoutTasks(List allStateAndStats, + List statsForTransformsWithoutTasks, ActionListener listener) { if (statsForTransformsWithoutTasks.isEmpty()) { @@ -250,9 +250,9 @@ public class TransportGetDataFrameTransformsStatsAction extends ActionListener.wrap( checkpointingInfo -> { synchronized (allStateAndStats) { - allStateAndStats.add(new DataFrameTransformStats( + allStateAndStats.add(new TransformStats( stat.getId(), - DataFrameTransformStats.State.STOPPED, + TransformStats.State.STOPPED, null, null, stat.getTransformStats(), diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java similarity index 80% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java index cc035c2c668..0712f322507 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.action; +package org.elasticsearch.xpack.transform.action; import org.apache.log4j.LogManager; import org.apache.log4j.Logger; @@ -40,14 +40,14 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; -import org.elasticsearch.xpack.dataframe.transforms.pivot.AggregationResultUtils; -import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.transform.transforms.pivot.AggregationResultUtils; +import org.elasticsearch.xpack.transform.transforms.pivot.Pivot; import java.util.ArrayList; import java.util.HashMap; @@ -56,12 +56,12 @@ import java.util.Map; import java.util.stream.Collectors; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.xpack.dataframe.transforms.DataFrameIndexer.COMPOSITE_AGGREGATION_NAME; +import static org.elasticsearch.xpack.transform.transforms.TransformIndexer.COMPOSITE_AGGREGATION_NAME; -public class TransportPreviewDataFrameTransformAction extends - HandledTransportAction { +public class TransportPreviewTransformAction extends + HandledTransportAction { - private static final Logger logger = LogManager.getLogger(TransportPreviewDataFrameTransformAction.class); + private static final Logger logger = LogManager.getLogger(TransportPreviewTransformAction.class); private static final int NUMBER_OF_PREVIEW_BUCKETS = 100; private final XPackLicenseState licenseState; private final Client client; @@ -70,11 +70,11 @@ public class TransportPreviewDataFrameTransformAction extends private final ClusterService clusterService; @Inject - public TransportPreviewDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, + public TransportPreviewTransformAction(TransportService transportService, ActionFilters actionFilters, Client client, ThreadPool threadPool, XPackLicenseState licenseState, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService) { - super(PreviewDataFrameTransformAction.NAME,transportService, actionFilters, PreviewDataFrameTransformAction.Request::new); + super(PreviewTransformAction.NAME,transportService, actionFilters, PreviewTransformAction.Request::new); this.licenseState = licenseState; this.client = client; this.threadPool = threadPool; @@ -84,21 +84,21 @@ public class TransportPreviewDataFrameTransformAction extends @Override protected void doExecute(Task task, - PreviewDataFrameTransformAction.Request request, - ActionListener listener) { - if (!licenseState.isDataFrameAllowed()) { - listener.onFailure(LicenseUtils.newComplianceException(XPackField.DATA_FRAME)); + PreviewTransformAction.Request request, + ActionListener listener) { + if (!licenseState.isTransformAllowed()) { + listener.onFailure(LicenseUtils.newComplianceException(XPackField.TRANSFORM)); return; } ClusterState clusterState = clusterService.state(); - final DataFrameTransformConfig config = request.getConfig(); + final TransformConfig config = request.getConfig(); for(String src : config.getSource().getIndex()) { String[] concreteNames = indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), src); if (concreteNames.length == 0) { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_SOURCE_INDEX_MISSING, src), + TransformMessages.getMessage(TransformMessages.REST_PUT_TRANSFORM_SOURCE_INDEX_MISSING, src), RestStatus.BAD_REQUEST)); return; } @@ -109,13 +109,13 @@ public class TransportPreviewDataFrameTransformAction extends pivot.validateConfig(); } catch (ElasticsearchStatusException e) { listener.onFailure( - new ElasticsearchStatusException(DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, + new ElasticsearchStatusException(TransformMessages.REST_PUT_TRANSFORM_FAILED_TO_VALIDATE_CONFIGURATION, e.status(), e)); return; } catch (Exception e) { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, RestStatus.INTERNAL_SERVER_ERROR, e)); + TransformMessages.REST_PUT_TRANSFORM_FAILED_TO_VALIDATE_CONFIGURATION, RestStatus.INTERNAL_SERVER_ERROR, e)); return; } @@ -127,8 +127,8 @@ public class TransportPreviewDataFrameTransformAction extends SourceConfig source, String pipeline, String dest, - ActionListener listener) { - final PreviewDataFrameTransformAction.Response previewResponse = new PreviewDataFrameTransformAction.Response(); + ActionListener listener) { + final PreviewTransformAction.Response previewResponse = new PreviewTransformAction.Response(); ActionListener pipelineResponseActionListener = ActionListener.wrap( simulatePipelineResponse -> { List> response = new ArrayList<>(simulatePipelineResponse.getResults().size()); @@ -150,7 +150,7 @@ public class TransportPreviewDataFrameTransformAction extends deducedMappings -> { previewResponse.setMappingsFromStringMap(deducedMappings); ClientHelper.executeWithHeadersAsync(threadPool.getThreadContext().getHeaders(), - ClientHelper.DATA_FRAME_ORIGIN, + ClientHelper.TRANSFORM_ORIGIN, client, SearchAction.INSTANCE, pivot.buildSearchRequest(source, null, NUMBER_OF_PREVIEW_BUCKETS), @@ -166,7 +166,7 @@ public class TransportPreviewDataFrameTransformAction extends return; } final CompositeAggregation agg = aggregations.get(COMPOSITE_AGGREGATION_NAME); - DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); + TransformIndexerStats stats = new TransformIndexerStats(); // remove all internal fields if (pipeline == null) { @@ -179,7 +179,7 @@ public class TransportPreviewDataFrameTransformAction extends List> results = pivot.extractResults(agg, deducedMappings, stats) .map(doc -> { Map src = new HashMap<>(); - String id = (String) doc.get(DataFrameField.DOCUMENT_ID_FIELD); + String id = (String) doc.get(TransformField.DOCUMENT_ID_FIELD); doc.keySet().removeIf(k -> k.startsWith("_")); src.put("_source", doc); src.put("_id", id); @@ -194,7 +194,7 @@ public class TransportPreviewDataFrameTransformAction extends new SimulatePipelineRequest(BytesReference.bytes(builder), XContentType.JSON); pipelineRequest.setId(pipeline); ClientHelper.executeAsyncWithOrigin(client, - ClientHelper.DATA_FRAME_ORIGIN, + ClientHelper.TRANSFORM_ORIGIN, SimulatePipelineAction.INSTANCE, pipelineRequest, pipelineResponseActionListener); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java similarity index 74% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java index 66d4f486f71..bdc841528fe 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.action; +package org.elasticsearch.xpack.transform.action; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; @@ -34,10 +34,6 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction.Request; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; @@ -45,10 +41,14 @@ import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.ResourcePrivileges; import org.elasticsearch.xpack.core.security.support.Exceptions; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; -import org.elasticsearch.xpack.dataframe.transforms.SourceDestValidator; -import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.PutTransformAction; +import org.elasticsearch.xpack.core.transform.action.PutTransformAction.Request; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; +import org.elasticsearch.xpack.transform.transforms.SourceDestValidator; +import org.elasticsearch.xpack.transform.transforms.pivot.Pivot; import java.io.IOException; import java.time.Instant; @@ -57,31 +57,31 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; -public class TransportPutDataFrameTransformAction extends TransportMasterNodeAction { +public class TransportPutTransformAction extends TransportMasterNodeAction { private final XPackLicenseState licenseState; private final Client client; - private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; + private final TransformConfigManager transformsConfigManager; private final SecurityContext securityContext; - private final DataFrameAuditor auditor; + private final TransformAuditor auditor; @Inject - public TransportPutDataFrameTransformAction(Settings settings, TransportService transportService, ThreadPool threadPool, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - ClusterService clusterService, XPackLicenseState licenseState, - DataFrameTransformsConfigManager dataFrameTransformsConfigManager, Client client, - DataFrameAuditor auditor) { - super(PutDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, - PutDataFrameTransformAction.Request::new, indexNameExpressionResolver); + public TransportPutTransformAction(Settings settings, TransportService transportService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService, XPackLicenseState licenseState, + TransformConfigManager transformsConfigManager, Client client, + TransformAuditor auditor) { + super(PutTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, + PutTransformAction.Request::new, indexNameExpressionResolver); this.licenseState = licenseState; this.client = client; - this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; + this.transformsConfigManager = transformsConfigManager; this.securityContext = XPackSettings.SECURITY_ENABLED.get(settings) ? new SecurityContext(settings, threadPool.getThreadContext()) : null; this.auditor = auditor; } - static HasPrivilegesRequest buildPrivilegeCheck(DataFrameTransformConfig config, + static HasPrivilegesRequest buildPrivilegeCheck(TransformConfig config, IndexNameExpressionResolver indexNameExpressionResolver, ClusterState clusterState, String username) { @@ -134,19 +134,19 @@ public class TransportPutDataFrameTransformAction extends TransportMasterNodeAct protected void masterOperation(Request request, ClusterState clusterState, ActionListener listener) throws Exception { - if (!licenseState.isDataFrameAllowed()) { - listener.onFailure(LicenseUtils.newComplianceException(XPackField.DATA_FRAME)); + if (!licenseState.isTransformAllowed()) { + listener.onFailure(LicenseUtils.newComplianceException(XPackField.TRANSFORM)); return; } XPackPlugin.checkReadyForXPackCustomMetadata(clusterState); - // set headers to run data frame transform as calling user + // set headers to run transform as calling user Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - DataFrameTransformConfig config = request.getConfig() + TransformConfig config = request.getConfig() .setHeaders(filteredHeaders) .setCreateTime(Instant.now()) .setVersion(Version.CURRENT); @@ -155,7 +155,7 @@ public class TransportPutDataFrameTransformAction extends TransportMasterNodeAct // quick check whether a transform has already been created under that name if (PersistentTasksCustomMetaData.getTaskWithId(clusterState, transformId) != null) { listener.onFailure(new ResourceAlreadyExistsException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_TRANSFORM_EXISTS, transformId))); + TransformMessages.getMessage(TransformMessages.REST_PUT_TRANSFORM_EXISTS, transformId))); return; } try { @@ -175,12 +175,12 @@ public class TransportPutDataFrameTransformAction extends TransportMasterNodeAct client.execute(HasPrivilegesAction.INSTANCE, privRequest, privResponseListener); } else { // No security enabled, just create the transform - putDataFrame(request, listener); + putTransform(request, listener); } } @Override - protected ClusterBlockException checkBlock(PutDataFrameTransformAction.Request request, ClusterState state) { + protected ClusterBlockException checkBlock(PutTransformAction.Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } @@ -189,7 +189,7 @@ public class TransportPutDataFrameTransformAction extends TransportMasterNodeAct HasPrivilegesResponse privilegesResponse, ActionListener listener) { if (privilegesResponse.isCompleteMatch()) { - putDataFrame(request, listener); + putTransform(request, listener); } else { List indices = privilegesResponse.getIndexPrivileges() .stream() @@ -197,22 +197,22 @@ public class TransportPutDataFrameTransformAction extends TransportMasterNodeAct .collect(Collectors.toList()); listener.onFailure(Exceptions.authorizationError( - "Cannot create data frame transform [{}] because user {} lacks all the required permissions for indices: {}", + "Cannot create transform [{}] because user {} lacks all the required permissions for indices: {}", request.getConfig().getId(), username, indices)); } } - private void putDataFrame(Request request, ActionListener listener) { + private void putTransform(Request request, ActionListener listener) { - final DataFrameTransformConfig config = request.getConfig(); + final TransformConfig config = request.getConfig(); final Pivot pivot = new Pivot(config.getPivotConfig()); // <3> Return to the listener ActionListener putTransformConfigurationListener = ActionListener.wrap( putTransformConfigurationResult -> { - auditor.info(config.getId(), "Created data frame transform."); + auditor.info(config.getId(), "Created transform."); listener.onResponse(new AcknowledgedResponse(true)); }, listener::onFailure @@ -220,16 +220,16 @@ public class TransportPutDataFrameTransformAction extends TransportMasterNodeAct // <2> Put our transform ActionListener pivotValidationListener = ActionListener.wrap( - validationResult -> dataFrameTransformsConfigManager.putTransformConfiguration(config, putTransformConfigurationListener), + validationResult -> transformsConfigManager.putTransformConfiguration(config, putTransformConfigurationListener), validationException -> { if (validationException instanceof ElasticsearchStatusException) { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, + TransformMessages.REST_PUT_TRANSFORM_FAILED_TO_VALIDATE_CONFIGURATION, ((ElasticsearchStatusException)validationException).status(), validationException)); } else { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, + TransformMessages.REST_PUT_TRANSFORM_FAILED_TO_VALIDATE_CONFIGURATION, RestStatus.INTERNAL_SERVER_ERROR, validationException)); } @@ -240,13 +240,13 @@ public class TransportPutDataFrameTransformAction extends TransportMasterNodeAct pivot.validateConfig(); } catch (ElasticsearchStatusException e) { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, + TransformMessages.REST_PUT_TRANSFORM_FAILED_TO_VALIDATE_CONFIGURATION, e.status(), e)); return; } catch (Exception e) { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, RestStatus.INTERNAL_SERVER_ERROR, e)); + TransformMessages.REST_PUT_TRANSFORM_FAILED_TO_VALIDATE_CONFIGURATION, RestStatus.INTERNAL_SERVER_ERROR, e)); return; } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java similarity index 59% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java index 6aaf46965ea..ad1430eb26f 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.action; +package org.elasticsearch.xpack.transform.action; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -34,18 +34,17 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.XPackField; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; -import org.elasticsearch.xpack.dataframe.persistence.DataframeIndex; -import org.elasticsearch.xpack.dataframe.transforms.SourceDestValidator; -import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.StartTransformAction; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformState; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; +import org.elasticsearch.xpack.transform.persistence.TransformIndex; +import org.elasticsearch.xpack.transform.transforms.SourceDestValidator; +import org.elasticsearch.xpack.transform.transforms.pivot.Pivot; import java.io.IOException; import java.time.Clock; @@ -55,27 +54,29 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.Predicate; -public class TransportStartDataFrameTransformAction extends - TransportMasterNodeAction { +import static org.elasticsearch.xpack.core.transform.TransformMessages.CANNOT_START_FAILED_TRANSFORM; - private static final Logger logger = LogManager.getLogger(TransportStartDataFrameTransformAction.class); +public class TransportStartTransformAction extends + TransportMasterNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportStartTransformAction.class); private final XPackLicenseState licenseState; - private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; + private final TransformConfigManager transformsConfigManager; private final PersistentTasksService persistentTasksService; private final Client client; - private final DataFrameAuditor auditor; + private final TransformAuditor auditor; @Inject - public TransportStartDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, - ClusterService clusterService, XPackLicenseState licenseState, - ThreadPool threadPool, IndexNameExpressionResolver indexNameExpressionResolver, - DataFrameTransformsConfigManager dataFrameTransformsConfigManager, - PersistentTasksService persistentTasksService, Client client, - DataFrameAuditor auditor) { - super(StartDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, - StartDataFrameTransformAction.Request::new, indexNameExpressionResolver); + public TransportStartTransformAction(TransportService transportService, ActionFilters actionFilters, + ClusterService clusterService, XPackLicenseState licenseState, + ThreadPool threadPool, IndexNameExpressionResolver indexNameExpressionResolver, + TransformConfigManager transformsConfigManager, + PersistentTasksService persistentTasksService, Client client, + TransformAuditor auditor) { + super(StartTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, + StartTransformAction.Request::new, indexNameExpressionResolver); this.licenseState = licenseState; - this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; + this.transformsConfigManager = transformsConfigManager; this.persistentTasksService = persistentTasksService; this.client = client; this.auditor = auditor; @@ -87,31 +88,31 @@ public class TransportStartDataFrameTransformAction extends } @Override - protected StartDataFrameTransformAction.Response read(StreamInput in) throws IOException { - return new StartDataFrameTransformAction.Response(in); + protected StartTransformAction.Response read(StreamInput in) throws IOException { + return new StartTransformAction.Response(in); } @Override - protected void masterOperation(StartDataFrameTransformAction.Request request, + protected void masterOperation(StartTransformAction.Request request, ClusterState state, - ActionListener listener) throws Exception { - if (!licenseState.isDataFrameAllowed()) { - listener.onFailure(LicenseUtils.newComplianceException(XPackField.DATA_FRAME)); + ActionListener listener) throws Exception { + if (!licenseState.isTransformAllowed()) { + listener.onFailure(LicenseUtils.newComplianceException(XPackField.TRANSFORM)); return; } - final AtomicReference transformTaskHolder = new AtomicReference<>(); + final AtomicReference transformTaskHolder = new AtomicReference<>(); // <4> Wait for the allocated task's state to STARTED - ActionListener> newPersistentTaskActionListener = + ActionListener> newPersistentTaskActionListener = ActionListener.wrap( task -> { - DataFrameTransform transformTask = transformTaskHolder.get(); + TransformTaskParams transformTask = transformTaskHolder.get(); assert transformTask != null; - waitForDataFrameTaskStarted(task.getId(), + waitForTransformTaskStarted(task.getId(), transformTask, request.timeout(), ActionListener.wrap( - taskStarted -> listener.onResponse(new StartDataFrameTransformAction.Response(true)), + taskStarted -> listener.onResponse(new StartTransformAction.Response(true)), listener::onFailure)); }, listener::onFailure @@ -120,50 +121,32 @@ public class TransportStartDataFrameTransformAction extends // <3> Create the task in cluster state so that it will start executing on the node ActionListener createOrGetIndexListener = ActionListener.wrap( unused -> { - DataFrameTransform transformTask = transformTaskHolder.get(); + TransformTaskParams transformTask = transformTaskHolder.get(); assert transformTask != null; - PersistentTasksCustomMetaData.PersistentTask existingTask = + PersistentTasksCustomMetaData.PersistentTask existingTask = getExistingTask(transformTask.getId(), state); if (existingTask == null) { // Create the allocated task and wait for it to be started persistentTasksService.sendStartRequest(transformTask.getId(), - DataFrameTransform.NAME, + TransformTaskParams.NAME, transformTask, newPersistentTaskActionListener); } else { - DataFrameTransformState transformState = (DataFrameTransformState)existingTask.getState(); - if(transformState.getTaskState() == DataFrameTransformTaskState.FAILED && request.isForce() == false) { + TransformState transformState = (TransformState)existingTask.getState(); + if(transformState.getTaskState() == TransformTaskState.FAILED) { listener.onFailure(new ElasticsearchStatusException( - "Unable to start data frame transform [" + request.getId() + - "] as it is in a failed state with failure: [" + transformState.getReason() + - "]. Use force start to restart data frame transform once error is resolved.", + TransformMessages.getMessage(CANNOT_START_FAILED_TRANSFORM, + request.getId(), + transformState.getReason()), RestStatus.CONFLICT)); - } else if (transformState.getTaskState() != DataFrameTransformTaskState.STOPPED && - transformState.getTaskState() != DataFrameTransformTaskState.FAILED) { - listener.onFailure(new ElasticsearchStatusException( - "Unable to start data frame transform [" + request.getId() + - "] as it is in state [" + transformState.getTaskState() + "]", RestStatus.CONFLICT)); } else { - // If the task already exists but is not assigned to a node, something is weird - // return a failure that includes the current assignment explanation (if one exists) - if (existingTask.isAssigned() == false) { - String assignmentExplanation = "unknown reason"; - if (existingTask.getAssignment() != null) { - assignmentExplanation = existingTask.getAssignment().getExplanation(); - } - listener.onFailure(new ElasticsearchStatusException("Unable to start data frame transform [" + - request.getId() + "] as it is not assigned to a node, explanation: " + assignmentExplanation, - RestStatus.CONFLICT)); - return; - } - // If the task already exists and is assigned to a node, simply attempt to set it to start - ClientHelper.executeAsyncWithOrigin(client, - ClientHelper.DATA_FRAME_ORIGIN, - StartDataFrameTransformTaskAction.INSTANCE, - new StartDataFrameTransformTaskAction.Request(request.getId(), request.isForce()), - ActionListener.wrap( - r -> listener.onResponse(new StartDataFrameTransformAction.Response(true)), - listener::onFailure)); + // If the task already exists that means that it is either running or failed + // Since it is not failed, that means it is running, we return a conflict. + listener.onFailure(new ElasticsearchStatusException( + "Cannot start transform [{}] as it is already started.", + RestStatus.CONFLICT, + request.getId() + )); } } }, @@ -171,11 +154,11 @@ public class TransportStartDataFrameTransformAction extends ); // <2> If the destination index exists, start the task, otherwise deduce our mappings for the destination index and create it - ActionListener getTransformListener = ActionListener.wrap( + ActionListener getTransformListener = ActionListener.wrap( config -> { if (config.isValid() == false) { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_CONFIG_INVALID, request.getId()), + TransformMessages.getMessage(TransformMessages.TRANSFORM_CONFIG_INVALID, request.getId()), RestStatus.BAD_REQUEST )); return; @@ -183,7 +166,7 @@ public class TransportStartDataFrameTransformAction extends // Validate source and destination indices SourceDestValidator.validate(config, clusterService.state(), indexNameExpressionResolver, false); - transformTaskHolder.set(createDataFrameTransform(config.getId(), config.getVersion(), config.getFrequency())); + transformTaskHolder.set(createTransform(config.getId(), config.getVersion(), config.getFrequency())); final String destinationIndex = config.getDestination().getIndex(); String[] dest = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), @@ -196,7 +179,7 @@ public class TransportStartDataFrameTransformAction extends } else { auditor.info(request.getId(), "Using existing destination index [" + destinationIndex + "]."); ClientHelper.executeAsyncWithOrigin(client.threadPool().getThreadContext(), - ClientHelper.DATA_FRAME_ORIGIN, + ClientHelper.TRANSFORM_ORIGIN, client.admin() .indices() .prepareStats(dest) @@ -225,22 +208,22 @@ public class TransportStartDataFrameTransformAction extends ); // <1> Get the config to verify it exists and is valid - dataFrameTransformsConfigManager.getTransformConfiguration(request.getId(), getTransformListener); + transformsConfigManager.getTransformConfiguration(request.getId(), getTransformListener); } - private void createDestinationIndex(final DataFrameTransformConfig config, final ActionListener listener) { + private void createDestinationIndex(final TransformConfig config, final ActionListener listener) { final Pivot pivot = new Pivot(config.getPivotConfig()); ActionListener> deduceMappingsListener = ActionListener.wrap( - mappings -> DataframeIndex.createDestinationIndex( + mappings -> TransformIndex.createDestinationIndex( client, Clock.systemUTC(), config, mappings, ActionListener.wrap(r -> listener.onResponse(null), listener::onFailure)), deduceTargetMappingsException -> listener.onFailure( - new RuntimeException(DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_DEDUCE_DEST_MAPPINGS, + new RuntimeException(TransformMessages.REST_PUT_TRANSFORM_FAILED_TO_DEDUCE_DEST_MAPPINGS, deduceTargetMappingsException)) ); @@ -248,48 +231,48 @@ public class TransportStartDataFrameTransformAction extends } @Override - protected ClusterBlockException checkBlock(StartDataFrameTransformAction.Request request, ClusterState state) { + protected ClusterBlockException checkBlock(StartTransformAction.Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } - private static DataFrameTransform createDataFrameTransform(String transformId, Version transformVersion, TimeValue frequency) { - return new DataFrameTransform(transformId, transformVersion, frequency); + private static TransformTaskParams createTransform(String transformId, Version transformVersion, TimeValue frequency) { + return new TransformTaskParams(transformId, transformVersion, frequency); } @SuppressWarnings("unchecked") - private static PersistentTasksCustomMetaData.PersistentTask getExistingTask(String id, ClusterState state) { + private static PersistentTasksCustomMetaData.PersistentTask getExistingTask(String id, ClusterState state) { PersistentTasksCustomMetaData pTasksMeta = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); if (pTasksMeta == null) { return null; } - Collection> existingTask = pTasksMeta.findTasks(DataFrameTransform.NAME, + Collection> existingTask = pTasksMeta.findTasks(TransformTaskParams.NAME, t -> t.getId().equals(id)); if (existingTask.isEmpty()) { return null; } else { assert(existingTask.size() == 1); PersistentTasksCustomMetaData.PersistentTask pTask = existingTask.iterator().next(); - if (pTask.getParams() instanceof DataFrameTransform) { - return (PersistentTasksCustomMetaData.PersistentTask)pTask; + if (pTask.getParams() instanceof TransformTaskParams) { + return (PersistentTasksCustomMetaData.PersistentTask)pTask; } - throw new ElasticsearchStatusException("Found data frame transform persistent task [" + id + "] with incorrect params", + throw new ElasticsearchStatusException("Found transform persistent task [" + id + "] with incorrect params", RestStatus.INTERNAL_SERVER_ERROR); } } - private void cancelDataFrameTask(String taskId, String dataFrameId, Exception exception, Consumer onFailure) { + private void cancelTransformTask(String taskId, String transformId, Exception exception, Consumer onFailure) { persistentTasksService.sendRemoveRequest(taskId, new ActionListener>() { @Override public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { - // We succeeded in cancelling the persistent task, but the + // We succeeded in canceling the persistent task, but the // problem that caused us to cancel it is the overall result onFailure.accept(exception); } @Override public void onFailure(Exception e) { - logger.error("[" + dataFrameId + "] Failed to cancel persistent task that could " + + logger.error("[" + transformId + "] Failed to cancel persistent task that could " + "not be assigned due to [" + exception.getMessage() + "]", e); onFailure.accept(exception); } @@ -297,19 +280,19 @@ public class TransportStartDataFrameTransformAction extends ); } - private void waitForDataFrameTaskStarted(String taskId, - DataFrameTransform params, + private void waitForTransformTaskStarted(String taskId, + TransformTaskParams params, TimeValue timeout, ActionListener listener) { - DataFramePredicate predicate = new DataFramePredicate(); + TransformPredicate predicate = new TransformPredicate(); persistentTasksService.waitForPersistentTaskCondition(taskId, predicate, timeout, - new PersistentTasksService.WaitForPersistentTaskListener() { + new PersistentTasksService.WaitForPersistentTaskListener() { @Override - public void onResponse(PersistentTasksCustomMetaData.PersistentTask + public void onResponse(PersistentTasksCustomMetaData.PersistentTask persistentTask) { if (predicate.exception != null) { // We want to return to the caller without leaving an unassigned persistent task - cancelDataFrameTask(taskId, params.getId(), predicate.exception, listener::onFailure); + cancelTransformTask(taskId, params.getId(), predicate.exception, listener::onFailure); } else { listener.onResponse(true); } @@ -322,7 +305,7 @@ public class TransportStartDataFrameTransformAction extends @Override public void onTimeout(TimeValue timeout) { - listener.onFailure(new ElasticsearchException("Starting dataframe [" + listener.onFailure(new ElasticsearchException("Starting transform [" + params.getId() + "] timed out after [" + timeout + "]")); } }); @@ -332,7 +315,7 @@ public class TransportStartDataFrameTransformAction extends * Important: the methods of this class must NOT throw exceptions. If they did then the callers * of endpoints waiting for a condition tested by this predicate would never get a response. */ - private class DataFramePredicate implements Predicate> { + private class TransformPredicate implements Predicate> { private volatile Exception exception; @@ -347,7 +330,7 @@ public class TransportStartDataFrameTransformAction extends assignment.isAssigned() == false) { // For some reason, the task is not assigned to a node, but is no longer in the `INITIAL_ASSIGNMENT` state // Consider this a failure. - exception = new ElasticsearchStatusException("Could not start dataframe, allocation explanation [" + + exception = new ElasticsearchStatusException("Could not start transform, allocation explanation [" + assignment.getExplanation() + "]", RestStatus.TOO_MANY_REQUESTS); return true; } @@ -359,8 +342,8 @@ public class TransportStartDataFrameTransformAction extends // But if it is in a failed state, _stats will show as much and give good reason to the user. // If it is not able to be assigned to a node all together, we should just close the task completely private boolean isNotStopped(PersistentTasksCustomMetaData.PersistentTask task) { - DataFrameTransformState state = (DataFrameTransformState)task.getState(); - return state != null && state.getTaskState().equals(DataFrameTransformTaskState.STOPPED) == false; + TransformState state = (TransformState)task.getState(); + return state != null && state.getTaskState().equals(TransformTaskState.STOPPED) == false; } } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java similarity index 78% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java index 721bf1825d4..0cbba5ef46d 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.action; +package org.elasticsearch.xpack.transform.action; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -30,15 +30,15 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.action.util.PageParams; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction.Request; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction.Response; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; -import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction.Request; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction.Response; +import org.elasticsearch.xpack.core.transform.transforms.TransformState; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndex; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; +import org.elasticsearch.xpack.transform.transforms.TransformTask; import java.util.ArrayList; import java.util.HashSet; @@ -47,27 +47,27 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import static org.elasticsearch.xpack.core.dataframe.DataFrameMessages.DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM; +import static org.elasticsearch.xpack.core.transform.TransformMessages.CANNOT_STOP_FAILED_TRANSFORM; -public class TransportStopDataFrameTransformAction extends TransportTasksAction { +public class TransportStopTransformAction extends TransportTasksAction { - private static final Logger logger = LogManager.getLogger(TransportStopDataFrameTransformAction.class); + private static final Logger logger = LogManager.getLogger(TransportStopTransformAction.class); private final ThreadPool threadPool; - private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; + private final TransformConfigManager transformsConfigManager; private final PersistentTasksService persistentTasksService; private final Client client; @Inject - public TransportStopDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, - ClusterService clusterService, ThreadPool threadPool, - PersistentTasksService persistentTasksService, - DataFrameTransformsConfigManager dataFrameTransformsConfigManager, - Client client) { - super(StopDataFrameTransformAction.NAME, clusterService, transportService, actionFilters, Request::new, + public TransportStopTransformAction(TransportService transportService, ActionFilters actionFilters, + ClusterService clusterService, ThreadPool threadPool, + PersistentTasksService persistentTasksService, + TransformConfigManager transformsConfigManager, + Client client) { + super(StopTransformAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, Response::new, ThreadPool.Names.SAME); this.threadPool = threadPool; - this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; + this.transformsConfigManager = transformsConfigManager; this.persistentTasksService = persistentTasksService; this.client = client; } @@ -80,19 +80,19 @@ public class TransportStopDataFrameTransformAction extends TransportTasksAction< for (String transformId : transformIds) { PersistentTasksCustomMetaData.PersistentTask dfTask = tasks.getTask(transformId); if (dfTask != null - && dfTask.getState() instanceof DataFrameTransformState - && ((DataFrameTransformState) dfTask.getState()).getTaskState() == DataFrameTransformTaskState.FAILED) { + && dfTask.getState() instanceof TransformState + && ((TransformState) dfTask.getState()).getTaskState() == TransformTaskState.FAILED) { failedTasks.add(transformId); - failedReasons.add(((DataFrameTransformState) dfTask.getState()).getReason()); + failedReasons.add(((TransformState) dfTask.getState()).getReason()); } } if (failedTasks.isEmpty() == false) { String msg = failedTasks.size() == 1 ? - DataFrameMessages.getMessage(DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM, + TransformMessages.getMessage(CANNOT_STOP_FAILED_TRANSFORM, failedTasks.get(0), failedReasons.get(0)) : - "Unable to stop data frame transforms. The following transforms are in a failed state " + - failedTasks + " with reasons " + failedReasons + ". Use force stop to stop the data frame transforms."; + "Unable to stop transforms. The following transforms are in a failed state " + + failedTasks + " with reasons " + failedReasons + ". Use force stop to stop the transforms."; throw new ElasticsearchStatusException(msg, RestStatus.CONFLICT); } } @@ -103,7 +103,7 @@ public class TransportStopDataFrameTransformAction extends TransportTasksAction< final ClusterState state = clusterService.state(); final DiscoveryNodes nodes = state.nodes(); if (nodes.isLocalNodeElectedMaster() == false) { - // Delegates stop data frame to elected master node so it becomes the coordinating node. + // Delegates stop transform to elected master node so it becomes the coordinating node. if (nodes.getMasterNode() == null) { listener.onFailure(new MasterNotDiscoveredException("no known master node")); } else { @@ -118,13 +118,13 @@ public class TransportStopDataFrameTransformAction extends TransportTasksAction< finalListener = listener; } - dataFrameTransformsConfigManager.expandTransformIds(request.getId(), + transformsConfigManager.expandTransformIds(request.getId(), new PageParams(0, 10_000), request.isAllowNoMatch(), ActionListener.wrap(hitsAndIds -> { validateTaskState(state, hitsAndIds.v2(), request.isForce()); request.setExpandedIds(new HashSet<>(hitsAndIds.v2())); - request.setNodes(DataFrameNodes.dataFrameTaskNodes(hitsAndIds.v2(), state)); + request.setNodes(TransformNodes.transformTaskNodes(hitsAndIds.v2(), state)); super.doExecute(task, request, finalListener); }, listener::onFailure @@ -133,7 +133,7 @@ public class TransportStopDataFrameTransformAction extends TransportTasksAction< } @Override - protected void taskOperation(Request request, DataFrameTransformTask transformTask, ActionListener listener) { + protected void taskOperation(Request request, TransformTask transformTask, ActionListener listener) { Set ids = request.getExpandedIds(); if (ids == null) { @@ -150,13 +150,13 @@ public class TransportStopDataFrameTransformAction extends TransportTasksAction< } listener.onResponse(new Response(Boolean.TRUE)); } else { - listener.onFailure(new RuntimeException("ID of data frame indexer task [" + transformTask.getTransformId() + listener.onFailure(new RuntimeException("ID of transform task [" + transformTask.getTransformId() + "] does not match request's ID [" + request.getId() + "]")); } } @Override - protected StopDataFrameTransformAction.Response newResponse(Request request, + protected StopTransformAction.Response newResponse(Request request, List tasks, List taskOperationFailures, List failedNodeExceptions) { @@ -175,7 +175,7 @@ public class TransportStopDataFrameTransformAction extends TransportTasksAction< waitResponse -> client.admin() .indices() - .prepareRefresh(DataFrameInternalIndex.LATEST_INDEX_NAME) + .prepareRefresh(TransformInternalIndex.LATEST_INDEX_NAME) .execute(ActionListener.wrap( r -> listener.onResponse(waitResponse), e -> { @@ -190,13 +190,13 @@ public class TransportStopDataFrameTransformAction extends TransportTasksAction< // Wait until the persistent task is stopped // Switch over to Generic threadpool so we don't block the network thread threadPool.generic().execute(() -> - waitForDataFrameStopped(request.getExpandedIds(), request.getTimeout(), request.isForce(), onStopListener)); + waitForTransformStopped(request.getExpandedIds(), request.getTimeout(), request.isForce(), onStopListener)); }, listener::onFailure ); } - private void waitForDataFrameStopped(Set persistentTaskIds, + private void waitForTransformStopped(Set persistentTaskIds, TimeValue timeout, boolean force, ActionListener listener) { @@ -214,10 +214,10 @@ public class TransportStopDataFrameTransformAction extends TransportTasksAction< } // If force is true, then it should eventually go away, don't add it to the collection of failures. - DataFrameTransformState taskState = (DataFrameTransformState)transformsTask.getState(); - if (force == false && taskState != null && taskState.getTaskState() == DataFrameTransformTaskState.FAILED) { + TransformState taskState = (TransformState)transformsTask.getState(); + if (force == false && taskState != null && taskState.getTaskState() == TransformTaskState.FAILED) { exceptions.put(persistentTaskId, new ElasticsearchStatusException( - DataFrameMessages.getMessage(DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM, + TransformMessages.getMessage(CANNOT_STOP_FAILED_TRANSFORM, persistentTaskId, taskState.getReason()), RestStatus.CONFLICT)); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportUpdateDataFrameTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java similarity index 72% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportUpdateDataFrameTransformAction.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java index 9b81efdd056..34f7bd1fdc8 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportUpdateDataFrameTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.action; +package org.elasticsearch.xpack.transform.action; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -33,24 +33,24 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.action.UpdateDataFrameTransformAction; -import org.elasticsearch.xpack.core.dataframe.action.UpdateDataFrameTransformAction.Request; -import org.elasticsearch.xpack.core.dataframe.action.UpdateDataFrameTransformAction.Response; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigUpdate; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesResponse; import org.elasticsearch.xpack.core.security.authz.permission.ResourcePrivileges; import org.elasticsearch.xpack.core.security.support.Exceptions; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; -import org.elasticsearch.xpack.dataframe.persistence.DataframeIndex; -import org.elasticsearch.xpack.dataframe.persistence.SeqNoPrimaryTermAndIndex; -import org.elasticsearch.xpack.dataframe.transforms.SourceDestValidator; -import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.UpdateTransformAction; +import org.elasticsearch.xpack.core.transform.action.UpdateTransformAction.Request; +import org.elasticsearch.xpack.core.transform.action.UpdateTransformAction.Response; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfigUpdate; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; +import org.elasticsearch.xpack.transform.persistence.TransformIndex; +import org.elasticsearch.xpack.transform.persistence.SeqNoPrimaryTermAndIndex; +import org.elasticsearch.xpack.transform.transforms.SourceDestValidator; +import org.elasticsearch.xpack.transform.transforms.pivot.Pivot; import java.io.IOException; import java.time.Clock; @@ -58,28 +58,28 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; -import static org.elasticsearch.xpack.dataframe.action.TransportPutDataFrameTransformAction.buildPrivilegeCheck; +import static org.elasticsearch.xpack.transform.action.TransportPutTransformAction.buildPrivilegeCheck; -public class TransportUpdateDataFrameTransformAction extends TransportMasterNodeAction { +public class TransportUpdateTransformAction extends TransportMasterNodeAction { - private static final Logger logger = LogManager.getLogger(TransportUpdateDataFrameTransformAction.class); + private static final Logger logger = LogManager.getLogger(TransportUpdateTransformAction.class); private final XPackLicenseState licenseState; private final Client client; - private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; + private final TransformConfigManager transformsConfigManager; private final SecurityContext securityContext; - private final DataFrameAuditor auditor; + private final TransformAuditor auditor; @Inject - public TransportUpdateDataFrameTransformAction(Settings settings, TransportService transportService, ThreadPool threadPool, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - ClusterService clusterService, XPackLicenseState licenseState, - DataFrameTransformsConfigManager dataFrameTransformsConfigManager, Client client, - DataFrameAuditor auditor) { - super(UpdateDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, + public TransportUpdateTransformAction(Settings settings, TransportService transportService, ThreadPool threadPool, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService, XPackLicenseState licenseState, + TransformConfigManager transformsConfigManager, Client client, + TransformAuditor auditor) { + super(UpdateTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, Request::new, indexNameExpressionResolver); this.licenseState = licenseState; this.client = client; - this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; + this.transformsConfigManager = transformsConfigManager; this.securityContext = XPackSettings.SECURITY_ENABLED.get(settings) ? new SecurityContext(settings, threadPool.getThreadContext()) : null; this.auditor = auditor; @@ -98,33 +98,33 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode @Override protected void masterOperation(Request request, ClusterState clusterState, ActionListener listener) { - if (!licenseState.isDataFrameAllowed()) { - listener.onFailure(LicenseUtils.newComplianceException(XPackField.DATA_FRAME)); + if (!licenseState.isTransformAllowed()) { + listener.onFailure(LicenseUtils.newComplianceException(XPackField.TRANSFORM)); return; } XPackPlugin.checkReadyForXPackCustomMetadata(clusterState); - // set headers to run data frame transform as calling user + // set headers to run transform as calling user Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - DataFrameTransformConfigUpdate update = request.getUpdate(); + TransformConfigUpdate update = request.getUpdate(); update.setHeaders(filteredHeaders); // GET transform and attempt to update // We don't want the update to complete if the config changed between GET and INDEX - dataFrameTransformsConfigManager.getTransformConfigurationForUpdate(request.getId(), ActionListener.wrap( + transformsConfigManager.getTransformConfigurationForUpdate(request.getId(), ActionListener.wrap( configAndVersion -> { - final DataFrameTransformConfig config = configAndVersion.v1(); + final TransformConfig config = configAndVersion.v1(); // If it is a noop don't bother even writing the doc, save the cycles, just return here. if (update.isNoop(config)) { listener.onResponse(new Response(config)); return; } - DataFrameTransformConfig updatedConfig = update.apply(config); - validateAndUpdateDataFrame(request, clusterState, updatedConfig, configAndVersion.v2(), listener); + TransformConfig updatedConfig = update.apply(config); + validateAndUpdateTransform(request, clusterState, updatedConfig, configAndVersion.v2(), listener); }, listener::onFailure )); @@ -137,13 +137,13 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode private void handlePrivsResponse(String username, Request request, - DataFrameTransformConfig config, + TransformConfig config, SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, ClusterState clusterState, HasPrivilegesResponse privilegesResponse, ActionListener listener) { if (privilegesResponse.isCompleteMatch()) { - updateDataFrame(request, config, seqNoPrimaryTermAndIndex, clusterState, listener); + updateTransform(request, config, seqNoPrimaryTermAndIndex, clusterState, listener); } else { List indices = privilegesResponse.getIndexPrivileges() .stream() @@ -151,16 +151,16 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode .collect(Collectors.toList()); listener.onFailure(Exceptions.authorizationError( - "Cannot update data frame transform [{}] because user {} lacks all the required permissions for indices: {}", + "Cannot update transform [{}] because user {} lacks all the required permissions for indices: {}", request.getId(), username, indices)); } } - private void validateAndUpdateDataFrame(Request request, + private void validateAndUpdateTransform(Request request, ClusterState clusterState, - DataFrameTransformConfig config, + TransformConfig config, SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, ActionListener listener) { try { @@ -181,11 +181,11 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode client.execute(HasPrivilegesAction.INSTANCE, privRequest, privResponseListener); } else { // No security enabled, just create the transform - updateDataFrame(request, config, seqNoPrimaryTermAndIndex, clusterState, listener); + updateTransform(request, config, seqNoPrimaryTermAndIndex, clusterState, listener); } } - private void updateDataFrame(Request request, - DataFrameTransformConfig config, + private void updateTransform(Request request, + TransformConfig config, SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, ClusterState clusterState, ActionListener listener) { @@ -195,8 +195,8 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode // <3> Return to the listener ActionListener putTransformConfigurationListener = ActionListener.wrap( putTransformConfigurationResult -> { - auditor.info(config.getId(), "updated data frame transform."); - dataFrameTransformsConfigManager.deleteOldTransformConfigurations(request.getId(), ActionListener.wrap( + auditor.info(config.getId(), "updated transform."); + transformsConfigManager.deleteOldTransformConfigurations(request.getId(), ActionListener.wrap( r -> { logger.trace("[{}] successfully deleted old transform configurations", request.getId()); listener.onResponse(new Response(config)); @@ -216,7 +216,7 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode // <2> Update our transform ActionListener createDestinationListener = ActionListener.wrap( - createDestResponse -> dataFrameTransformsConfigManager.updateTransformConfiguration(config, + createDestResponse -> transformsConfigManager.updateTransformConfiguration(config, seqNoPrimaryTermAndIndex, putTransformConfigurationListener), listener::onFailure @@ -246,12 +246,12 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode validationException -> { if (validationException instanceof ElasticsearchStatusException) { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, + TransformMessages.REST_PUT_TRANSFORM_FAILED_TO_VALIDATE_CONFIGURATION, ((ElasticsearchStatusException)validationException).status(), validationException)); } else { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, + TransformMessages.REST_PUT_TRANSFORM_FAILED_TO_VALIDATE_CONFIGURATION, RestStatus.INTERNAL_SERVER_ERROR, validationException)); } @@ -262,13 +262,13 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode pivot.validateConfig(); } catch (ElasticsearchStatusException e) { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, + TransformMessages.REST_PUT_TRANSFORM_FAILED_TO_VALIDATE_CONFIGURATION, e.status(), e)); return; } catch (Exception e) { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_VALIDATE_DATA_FRAME_CONFIGURATION, RestStatus.INTERNAL_SERVER_ERROR, e)); + TransformMessages.REST_PUT_TRANSFORM_FAILED_TO_VALIDATE_CONFIGURATION, RestStatus.INTERNAL_SERVER_ERROR, e)); return; } @@ -280,16 +280,16 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode } } - private void createDestination(Pivot pivot, DataFrameTransformConfig config, ActionListener listener) { + private void createDestination(Pivot pivot, TransformConfig config, ActionListener listener) { ActionListener> deduceMappingsListener = ActionListener.wrap( - mappings -> DataframeIndex.createDestinationIndex( + mappings -> TransformIndex.createDestinationIndex( client, Clock.systemUTC(), config, mappings, ActionListener.wrap(r -> listener.onResponse(null), listener::onFailure)), deduceTargetMappingsException -> listener.onFailure( - new RuntimeException(DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_TO_DEDUCE_DEST_MAPPINGS, + new RuntimeException(TransformMessages.REST_PUT_TRANSFORM_FAILED_TO_DEDUCE_DEST_MAPPINGS, deduceTargetMappingsException)) ); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/CheckpointException.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/CheckpointException.java similarity index 91% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/CheckpointException.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/CheckpointException.java index f8405d37057..6f7a778d301 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/CheckpointException.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/CheckpointException.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.checkpoint; +package org.elasticsearch.xpack.transform.checkpoint; import org.elasticsearch.ElasticsearchException; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/CheckpointProvider.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/CheckpointProvider.java similarity index 51% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/CheckpointProvider.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/CheckpointProvider.java index 8dcab5879fb..5fca07cef2b 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/CheckpointProvider.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/CheckpointProvider.java @@ -4,13 +4,13 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.checkpoint; +package org.elasticsearch.xpack.transform.checkpoint; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointingInfo; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; /** * Interface for checkpoint creation, checking for changes and getting statistics about checkpoints @@ -23,20 +23,20 @@ public interface CheckpointProvider { * @param lastCheckpoint the last checkpoint * @param listener listener to call after inner request returned */ - void createNextCheckpoint(DataFrameTransformCheckpoint lastCheckpoint, ActionListener listener); + void createNextCheckpoint(TransformCheckpoint lastCheckpoint, ActionListener listener); /** - * Determines whether the data frame needs updating + * Determines whether the transform needs updating * * @param lastCheckpoint the last checkpoint * @param listener listener to send the result to */ - void sourceHasChanged(DataFrameTransformCheckpoint lastCheckpoint, ActionListener listener); + void sourceHasChanged(TransformCheckpoint lastCheckpoint, ActionListener listener); /** * Get checkpoint statistics for a running data frame * - * For running data frames most information is available in-memory. + * For running transforms most information is available in-memory. * * @param lastCheckpoint the last checkpoint * @param nextCheckpoint the next checkpoint @@ -44,16 +44,16 @@ public interface CheckpointProvider { * @param nextCheckpointProgress progress for the next checkpoint * @param listener listener to retrieve the result */ - void getCheckpointingInfo(DataFrameTransformCheckpoint lastCheckpoint, - DataFrameTransformCheckpoint nextCheckpoint, - DataFrameIndexerPosition nextCheckpointPosition, - DataFrameTransformProgress nextCheckpointProgress, - ActionListener listener); + void getCheckpointingInfo(TransformCheckpoint lastCheckpoint, + TransformCheckpoint nextCheckpoint, + TransformIndexerPosition nextCheckpointPosition, + TransformProgress nextCheckpointProgress, + ActionListener listener); /** * Get checkpoint statistics for a stopped data frame * - * For stopped data frames we need to do lookups in the internal index. + * For stopped transforms we need to do lookups in the internal index. * * @param lastCheckpointNumber the last checkpoint number * @param nextCheckpointPosition position for the next checkpoint @@ -61,7 +61,7 @@ public interface CheckpointProvider { * @param listener listener to retrieve the result */ void getCheckpointingInfo(long lastCheckpointNumber, - DataFrameIndexerPosition nextCheckpointPosition, - DataFrameTransformProgress nextCheckpointProgress, - ActionListener listener); + TransformIndexerPosition nextCheckpointPosition, + TransformProgress nextCheckpointProgress, + ActionListener listener); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DefaultCheckpointProvider.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java similarity index 70% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DefaultCheckpointProvider.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java index 5464304d5b8..e624f2e6270 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/DefaultCheckpointProvider.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProvider.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.checkpoint; +package org.elasticsearch.xpack.transform.checkpoint; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -20,14 +20,14 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointingInfo; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; import java.util.Arrays; import java.util.Collections; @@ -44,60 +44,60 @@ public class DefaultCheckpointProvider implements CheckpointProvider { /** * Builder for collecting checkpointing information for the purpose of _stats */ - private static class DataFrameTransformCheckpointingInfoBuilder { - private DataFrameIndexerPosition nextCheckpointPosition; - private DataFrameTransformProgress nextCheckpointProgress; - private DataFrameTransformCheckpoint lastCheckpoint; - private DataFrameTransformCheckpoint nextCheckpoint; - private DataFrameTransformCheckpoint sourceCheckpoint; + private static class TransformCheckpointingInfoBuilder { + private TransformIndexerPosition nextCheckpointPosition; + private TransformProgress nextCheckpointProgress; + private TransformCheckpoint lastCheckpoint; + private TransformCheckpoint nextCheckpoint; + private TransformCheckpoint sourceCheckpoint; - DataFrameTransformCheckpointingInfoBuilder() { + TransformCheckpointingInfoBuilder() { } - DataFrameTransformCheckpointingInfo build() { + TransformCheckpointingInfo build() { if (lastCheckpoint == null) { - lastCheckpoint = DataFrameTransformCheckpoint.EMPTY; + lastCheckpoint = TransformCheckpoint.EMPTY; } if (nextCheckpoint == null) { - nextCheckpoint = DataFrameTransformCheckpoint.EMPTY; + nextCheckpoint = TransformCheckpoint.EMPTY; } if (sourceCheckpoint == null) { - sourceCheckpoint = DataFrameTransformCheckpoint.EMPTY; + sourceCheckpoint = TransformCheckpoint.EMPTY; } // checkpointstats requires a non-negative checkpoint number long lastCheckpointNumber = lastCheckpoint.getCheckpoint() > 0 ? lastCheckpoint.getCheckpoint() : 0; long nextCheckpointNumber = nextCheckpoint.getCheckpoint() > 0 ? nextCheckpoint.getCheckpoint() : 0; - return new DataFrameTransformCheckpointingInfo( - new DataFrameTransformCheckpointStats(lastCheckpointNumber, null, null, + return new TransformCheckpointingInfo( + new TransformCheckpointStats(lastCheckpointNumber, null, null, lastCheckpoint.getTimestamp(), lastCheckpoint.getTimeUpperBound()), - new DataFrameTransformCheckpointStats(nextCheckpointNumber, nextCheckpointPosition, + new TransformCheckpointStats(nextCheckpointNumber, nextCheckpointPosition, nextCheckpointProgress, nextCheckpoint.getTimestamp(), nextCheckpoint.getTimeUpperBound()), - DataFrameTransformCheckpoint.getBehind(lastCheckpoint, sourceCheckpoint)); + TransformCheckpoint.getBehind(lastCheckpoint, sourceCheckpoint)); } - public DataFrameTransformCheckpointingInfoBuilder setLastCheckpoint(DataFrameTransformCheckpoint lastCheckpoint) { + public TransformCheckpointingInfoBuilder setLastCheckpoint(TransformCheckpoint lastCheckpoint) { this.lastCheckpoint = lastCheckpoint; return this; } - public DataFrameTransformCheckpointingInfoBuilder setNextCheckpoint(DataFrameTransformCheckpoint nextCheckpoint) { + public TransformCheckpointingInfoBuilder setNextCheckpoint(TransformCheckpoint nextCheckpoint) { this.nextCheckpoint = nextCheckpoint; return this; } - public DataFrameTransformCheckpointingInfoBuilder setSourceCheckpoint(DataFrameTransformCheckpoint sourceCheckpoint) { + public TransformCheckpointingInfoBuilder setSourceCheckpoint(TransformCheckpoint sourceCheckpoint) { this.sourceCheckpoint = sourceCheckpoint; return this; } - public DataFrameTransformCheckpointingInfoBuilder setNextCheckpointProgress(DataFrameTransformProgress nextCheckpointProgress) { + public TransformCheckpointingInfoBuilder setNextCheckpointProgress(TransformProgress nextCheckpointProgress) { this.nextCheckpointProgress = nextCheckpointProgress; return this; } - public DataFrameTransformCheckpointingInfoBuilder setNextCheckpointPosition(DataFrameIndexerPosition nextCheckpointPosition) { + public TransformCheckpointingInfoBuilder setNextCheckpointPosition(TransformIndexerPosition nextCheckpointPosition) { this.nextCheckpointPosition = nextCheckpointPosition; return this; } @@ -106,28 +106,28 @@ public class DefaultCheckpointProvider implements CheckpointProvider { private static final Logger logger = LogManager.getLogger(DefaultCheckpointProvider.class); protected final Client client; - protected final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; - protected final DataFrameAuditor dataFrameAuditor; - protected final DataFrameTransformConfig transformConfig; + protected final TransformConfigManager transformConfigManager; + protected final TransformAuditor transformAuditor; + protected final TransformConfig transformConfig; public DefaultCheckpointProvider(final Client client, - final DataFrameTransformsConfigManager dataFrameTransformsConfigManager, - final DataFrameAuditor dataFrameAuditor, - final DataFrameTransformConfig transformConfig) { + final TransformConfigManager transformConfigManager, + final TransformAuditor transformAuditor, + final TransformConfig transformConfig) { this.client = client; - this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; - this.dataFrameAuditor = dataFrameAuditor; + this.transformConfigManager = transformConfigManager; + this.transformAuditor = transformAuditor; this.transformConfig = transformConfig; } @Override - public void sourceHasChanged(final DataFrameTransformCheckpoint lastCheckpoint, final ActionListener listener) { + public void sourceHasChanged(final TransformCheckpoint lastCheckpoint, final ActionListener listener) { listener.onResponse(false); } @Override - public void createNextCheckpoint(final DataFrameTransformCheckpoint lastCheckpoint, - final ActionListener listener) { + public void createNextCheckpoint(final TransformCheckpoint lastCheckpoint, + final ActionListener listener) { final long timestamp = System.currentTimeMillis(); final long checkpoint = lastCheckpoint != null ? lastCheckpoint.getCheckpoint() + 1 : 1; @@ -135,7 +135,7 @@ public class DefaultCheckpointProvider implements CheckpointProvider { reportSourceIndexChanges(lastCheckpoint != null ? lastCheckpoint.getIndicesCheckpoints().keySet() : Collections.emptySet(), checkpointsByIndex.keySet()); - listener.onResponse(new DataFrameTransformCheckpoint(transformConfig.getId(), timestamp, checkpoint, checkpointsByIndex, 0L)); + listener.onResponse(new TransformCheckpoint(transformConfig.getId(), timestamp, checkpoint, checkpointsByIndex, 0L)); }, listener::onFailure)); } @@ -146,14 +146,14 @@ public class DefaultCheckpointProvider implements CheckpointProvider { .features(new GetIndexRequest.Feature[0]) .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); - ClientHelper.executeWithHeadersAsync(transformConfig.getHeaders(), ClientHelper.DATA_FRAME_ORIGIN, client, GetIndexAction.INSTANCE, + ClientHelper.executeWithHeadersAsync(transformConfig.getHeaders(), ClientHelper.TRANSFORM_ORIGIN, client, GetIndexAction.INSTANCE, getIndexRequest, ActionListener.wrap(getIndexResponse -> { Set userIndices = getIndexResponse.getIndices() != null ? new HashSet<>(Arrays.asList(getIndexResponse.getIndices())) : Collections.emptySet(); // 2nd get stats request ClientHelper.executeAsyncWithOrigin(client, - ClientHelper.DATA_FRAME_ORIGIN, + ClientHelper.TRANSFORM_ORIGIN, IndicesStatsAction.INSTANCE, new IndicesStatsRequest() .indices(transformConfig.getSource().getIndex()) @@ -230,13 +230,13 @@ public class DefaultCheckpointProvider implements CheckpointProvider { } @Override - public void getCheckpointingInfo(DataFrameTransformCheckpoint lastCheckpoint, - DataFrameTransformCheckpoint nextCheckpoint, - DataFrameIndexerPosition nextCheckpointPosition, - DataFrameTransformProgress nextCheckpointProgress, - ActionListener listener) { + public void getCheckpointingInfo(TransformCheckpoint lastCheckpoint, + TransformCheckpoint nextCheckpoint, + TransformIndexerPosition nextCheckpointPosition, + TransformProgress nextCheckpointProgress, + ActionListener listener) { - DataFrameTransformCheckpointingInfoBuilder checkpointingInfoBuilder = new DataFrameTransformCheckpointingInfoBuilder(); + TransformCheckpointingInfoBuilder checkpointingInfoBuilder = new TransformCheckpointingInfoBuilder(); checkpointingInfoBuilder.setLastCheckpoint(lastCheckpoint) .setNextCheckpoint(nextCheckpoint) @@ -247,17 +247,17 @@ public class DefaultCheckpointProvider implements CheckpointProvider { getIndexCheckpoints(ActionListener.wrap(checkpointsByIndex -> { checkpointingInfoBuilder.setSourceCheckpoint( - new DataFrameTransformCheckpoint(transformConfig.getId(), timestamp, -1L, checkpointsByIndex, 0L)); + new TransformCheckpoint(transformConfig.getId(), timestamp, -1L, checkpointsByIndex, 0L)); listener.onResponse(checkpointingInfoBuilder.build()); }, listener::onFailure)); } @Override - public void getCheckpointingInfo(long lastCheckpointNumber, DataFrameIndexerPosition nextCheckpointPosition, - DataFrameTransformProgress nextCheckpointProgress, - ActionListener listener) { + public void getCheckpointingInfo(long lastCheckpointNumber, TransformIndexerPosition nextCheckpointPosition, + TransformProgress nextCheckpointProgress, + ActionListener listener) { - DataFrameTransformCheckpointingInfoBuilder checkpointingInfoBuilder = new DataFrameTransformCheckpointingInfoBuilder(); + TransformCheckpointingInfoBuilder checkpointingInfoBuilder = new TransformCheckpointingInfoBuilder(); checkpointingInfoBuilder.setNextCheckpointPosition(nextCheckpointPosition).setNextCheckpointProgress(nextCheckpointProgress); @@ -267,47 +267,47 @@ public class DefaultCheckpointProvider implements CheckpointProvider { ActionListener> checkpointsByIndexListener = ActionListener.wrap( checkpointsByIndex -> { checkpointingInfoBuilder.setSourceCheckpoint( - new DataFrameTransformCheckpoint(transformConfig.getId(), timestamp, -1L, checkpointsByIndex, 0L)); + new TransformCheckpoint(transformConfig.getId(), timestamp, -1L, checkpointsByIndex, 0L)); listener.onResponse(checkpointingInfoBuilder.build()); }, e -> { logger.debug((Supplier) () -> new ParameterizedMessage( - "Failed to retrieve source checkpoint for data frame [{}]", transformConfig.getId()), e); + "Failed to retrieve source checkpoint for transform [{}]", transformConfig.getId()), e); listener.onFailure(new CheckpointException("Failure during source checkpoint info retrieval", e)); } ); // <2> got the next checkpoint, get the source checkpoint - ActionListener nextCheckpointListener = ActionListener.wrap( + ActionListener nextCheckpointListener = ActionListener.wrap( nextCheckpointObj -> { checkpointingInfoBuilder.setNextCheckpoint(nextCheckpointObj); getIndexCheckpoints(checkpointsByIndexListener); }, e -> { logger.debug((Supplier) () -> new ParameterizedMessage( - "Failed to retrieve next checkpoint [{}] for data frame [{}]", lastCheckpointNumber + 1, + "Failed to retrieve next checkpoint [{}] for transform [{}]", lastCheckpointNumber + 1, transformConfig.getId()), e); listener.onFailure(new CheckpointException("Failure during next checkpoint info retrieval", e)); } ); // <1> got last checkpoint, get the next checkpoint - ActionListener lastCheckpointListener = ActionListener.wrap( + ActionListener lastCheckpointListener = ActionListener.wrap( lastCheckpointObj -> { checkpointingInfoBuilder.lastCheckpoint = lastCheckpointObj; - dataFrameTransformsConfigManager.getTransformCheckpoint(transformConfig.getId(), lastCheckpointNumber + 1, + transformConfigManager.getTransformCheckpoint(transformConfig.getId(), lastCheckpointNumber + 1, nextCheckpointListener); }, e -> { logger.debug((Supplier) () -> new ParameterizedMessage( - "Failed to retrieve last checkpoint [{}] for data frame [{}]", lastCheckpointNumber, + "Failed to retrieve last checkpoint [{}] for transform [{}]", lastCheckpointNumber, transformConfig.getId()), e); listener.onFailure(new CheckpointException("Failure during last checkpoint info retrieval", e)); } ); if (lastCheckpointNumber != 0) { - dataFrameTransformsConfigManager.getTransformCheckpoint(transformConfig.getId(), lastCheckpointNumber, lastCheckpointListener); + transformConfigManager.getTransformCheckpoint(transformConfig.getId(), lastCheckpointNumber, lastCheckpointListener); } else { getIndexCheckpoints(checkpointsByIndexListener); } @@ -324,7 +324,7 @@ public class DefaultCheckpointProvider implements CheckpointProvider { if (newSourceIndexes.isEmpty() && lastSourceIndexes.isEmpty() == false) { String message = "Source did not resolve to any open indexes"; logger.warn("{} for transform [{}]", message, transformConfig.getId()); - dataFrameAuditor.warning(transformConfig.getId(), message); + transformAuditor.warning(transformConfig.getId(), message); } else { Set removedIndexes = Sets.difference(lastSourceIndexes, newSourceIndexes); Set addedIndexes = Sets.difference(newSourceIndexes, lastSourceIndexes); @@ -333,11 +333,11 @@ public class DefaultCheckpointProvider implements CheckpointProvider { String message = "Source index resolve found more than " + AUDIT_CONCRETED_SOURCE_INDEX_CHANGES + " changes, [" + removedIndexes.size() + "] removed indexes, [" + addedIndexes.size() + "] new indexes"; logger.debug("{} for transform [{}]", message, transformConfig.getId()); - dataFrameAuditor.info(transformConfig.getId(), message); + transformAuditor.info(transformConfig.getId(), message); } else if (removedIndexes.size() + addedIndexes.size() > 0) { String message = "Source index resolve found changes, removedIndexes: " + removedIndexes + ", new indexes: " + addedIndexes; logger.debug("{} for transform [{}]", message, transformConfig.getId()); - dataFrameAuditor.info(transformConfig.getId(), message); + transformAuditor.info(transformConfig.getId(), message); } } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/TimeBasedCheckpointProvider.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java similarity index 71% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/TimeBasedCheckpointProvider.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java index 276d3fe3bcc..13d01d7ce31 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/checkpoint/TimeBasedCheckpointProvider.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProvider.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.checkpoint; +package org.elasticsearch.xpack.transform.checkpoint; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -18,11 +18,11 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.TimeSyncConfig; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; public class TimeBasedCheckpointProvider extends DefaultCheckpointProvider { @@ -31,15 +31,15 @@ public class TimeBasedCheckpointProvider extends DefaultCheckpointProvider { private final TimeSyncConfig timeSyncConfig; TimeBasedCheckpointProvider(final Client client, - final DataFrameTransformsConfigManager dataFrameTransformsConfigManager, - final DataFrameAuditor dataFrameAuditor, - final DataFrameTransformConfig transformConfig) { - super(client, dataFrameTransformsConfigManager, dataFrameAuditor, transformConfig); + final TransformConfigManager transformConfigManager, + final TransformAuditor transformAuditor, + final TransformConfig transformConfig) { + super(client, transformConfigManager, transformAuditor, transformConfig); timeSyncConfig = (TimeSyncConfig) transformConfig.getSyncConfig(); } @Override - public void sourceHasChanged(DataFrameTransformCheckpoint lastCheckpoint, + public void sourceHasChanged(TransformCheckpoint lastCheckpoint, ActionListener listener) { final long timestamp = getTime(); @@ -64,15 +64,15 @@ public class TimeBasedCheckpointProvider extends DefaultCheckpointProvider { logger.trace("query for changes based on time: {}", sourceBuilder); - ClientHelper.executeWithHeadersAsync(transformConfig.getHeaders(), ClientHelper.DATA_FRAME_ORIGIN, client, SearchAction.INSTANCE, + ClientHelper.executeWithHeadersAsync(transformConfig.getHeaders(), ClientHelper.TRANSFORM_ORIGIN, client, SearchAction.INSTANCE, searchRequest, ActionListener.wrap(r -> { listener.onResponse(r.getHits().getTotalHits().value > 0L); }, listener::onFailure)); } @Override - public void createNextCheckpoint(final DataFrameTransformCheckpoint lastCheckpoint, - final ActionListener listener) { + public void createNextCheckpoint(final TransformCheckpoint lastCheckpoint, + final ActionListener listener) { final long timestamp = getTime(); final long checkpoint = lastCheckpoint != null ? lastCheckpoint.getCheckpoint() + 1 : 1; @@ -81,7 +81,7 @@ public class TimeBasedCheckpointProvider extends DefaultCheckpointProvider { getIndexCheckpoints(ActionListener.wrap(checkpointsByIndex -> { listener.onResponse( - new DataFrameTransformCheckpoint(transformConfig.getId(), timestamp, checkpoint, checkpointsByIndex, timeUpperBound)); + new TransformCheckpoint(transformConfig.getId(), timestamp, checkpoint, checkpointsByIndex, timeUpperBound)); }, listener::onFailure)); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointService.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointService.java new file mode 100644 index 00000000000..55464474aa5 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointService.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.checkpoint; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointingInfo; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; +import org.elasticsearch.xpack.core.transform.transforms.TimeSyncConfig; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; + +/** + * Transform Checkpoint Service + * + * Allows checkpointing a source of a transform which includes all relevant checkpoints of the source. + * + * This will be used to checkpoint a transform, detect changes, run the transform in continuous mode. + * + */ +public class TransformCheckpointService { + + private static final Logger logger = LogManager.getLogger(TransformCheckpointService.class); + + private final Client client; + private final TransformConfigManager transformConfigManager; + private final TransformAuditor transformAuditor; + + public TransformCheckpointService(final Client client, + final TransformConfigManager transformConfigManager, TransformAuditor transformAuditor) { + this.client = client; + this.transformConfigManager = transformConfigManager; + this.transformAuditor = transformAuditor; + } + + public CheckpointProvider getCheckpointProvider(final TransformConfig transformConfig) { + if (transformConfig.getSyncConfig() instanceof TimeSyncConfig) { + return new TimeBasedCheckpointProvider(client, transformConfigManager, transformAuditor, transformConfig); + } + + return new DefaultCheckpointProvider(client, transformConfigManager, transformAuditor, transformConfig); + } + + /** + * Get checkpointing stats for a stopped transform + * + * @param transformId The transform id + * @param lastCheckpointNumber the last checkpoint + * @param nextCheckpointPosition position for the next checkpoint + * @param nextCheckpointProgress progress for the next checkpoint + * @param listener listener to retrieve the result + */ + public void getCheckpointingInfo(final String transformId, + final long lastCheckpointNumber, + final TransformIndexerPosition nextCheckpointPosition, + final TransformProgress nextCheckpointProgress, + final ActionListener listener) { + + // we need to retrieve the config first before we can defer the rest to the corresponding provider + transformConfigManager.getTransformConfiguration(transformId, ActionListener.wrap( + transformConfig -> { + getCheckpointProvider(transformConfig).getCheckpointingInfo(lastCheckpointNumber, + nextCheckpointPosition, nextCheckpointProgress, listener); + }, + transformError -> { + logger.warn("Failed to retrieve configuration for transform [" + transformId + "]", transformError); + listener.onFailure(new CheckpointException("Failed to retrieve configuration", transformError)); + }) + ); + } + +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/notifications/TransformAuditor.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/notifications/TransformAuditor.java new file mode 100644 index 00000000000..055a3c02f8b --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/notifications/TransformAuditor.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.transform.notifications; + +import org.elasticsearch.client.Client; +import org.elasticsearch.xpack.core.common.notifications.AbstractAuditor; +import org.elasticsearch.xpack.core.transform.notifications.TransformAuditMessage; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndex; + +import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; + +/** + * TransformAuditor class that abstracts away generic templating for easier injection + */ +public class TransformAuditor extends AbstractAuditor { + + public TransformAuditor(Client client, String nodeName) { + super(client, nodeName, TransformInternalIndex.AUDIT_INDEX, TRANSFORM_ORIGIN, TransformAuditMessage::new); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/SeqNoPrimaryTermAndIndex.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndex.java similarity index 97% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/SeqNoPrimaryTermAndIndex.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndex.java index 68d88c18fe6..fb1f0370428 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/SeqNoPrimaryTermAndIndex.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndex.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.persistence; +package org.elasticsearch.xpack.transform.persistence; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.search.SearchHit; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformConfigManager.java similarity index 74% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformConfigManager.java index b58b96a8d19..4bb4cbadd6a 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformConfigManager.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.persistence; +package org.elasticsearch.xpack.transform.persistence; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -48,11 +48,11 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.action.util.ExpandedIdsMatcher; import org.elasticsearch.xpack.core.action.util.PageParams; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStoredDoc; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformStoredDoc; import java.io.IOException; import java.io.InputStream; @@ -64,11 +64,11 @@ import java.util.List; import java.util.Map; import java.util.Set; -import static org.elasticsearch.xpack.core.ClientHelper.DATA_FRAME_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; /** - * Place of all interactions with the internal transforms index. For configuration and mappings see @link{DataFrameInternalIndex} + * Place of all interactions with the internal transforms index. For configuration and mappings see @link{TransformInternalIndex} * * Versioned Index: * @@ -87,16 +87,16 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; * * TODO: Provide a method that moves old docs into the current index and delete old indexes and templates */ -public class DataFrameTransformsConfigManager { +public class TransformConfigManager { - private static final Logger logger = LogManager.getLogger(DataFrameTransformsConfigManager.class); + private static final Logger logger = LogManager.getLogger(TransformConfigManager.class); - public static final Map TO_XCONTENT_PARAMS = Collections.singletonMap(DataFrameField.FOR_INTERNAL_STORAGE, "true"); + public static final Map TO_XCONTENT_PARAMS = Collections.singletonMap(TransformField.FOR_INTERNAL_STORAGE, "true"); private final Client client; private final NamedXContentRegistry xContentRegistry; - public DataFrameTransformsConfigManager(Client client, NamedXContentRegistry xContentRegistry) { + public TransformConfigManager(Client client, NamedXContentRegistry xContentRegistry) { this.client = client; this.xContentRegistry = xContentRegistry; } @@ -104,20 +104,20 @@ public class DataFrameTransformsConfigManager { /** * Persist a checkpoint in the internal index * - * @param checkpoint the @link{DataFrameTransformCheckpoint} + * @param checkpoint the @link{TransformCheckpoint} * @param listener listener to call after request has been made */ - public void putTransformCheckpoint(DataFrameTransformCheckpoint checkpoint, ActionListener listener) { + public void putTransformCheckpoint(TransformCheckpoint checkpoint, ActionListener listener) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { XContentBuilder source = checkpoint.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); - IndexRequest indexRequest = new IndexRequest(DataFrameInternalIndex.LATEST_INDEX_NAME) + IndexRequest indexRequest = new IndexRequest(TransformInternalIndex.LATEST_INDEX_NAME) .opType(DocWriteRequest.OpType.INDEX) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .id(DataFrameTransformCheckpoint.documentId(checkpoint.getTransformId(), checkpoint.getCheckpoint())) + .id(TransformCheckpoint.documentId(checkpoint.getTransformId(), checkpoint.getCheckpoint())) .source(source); - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap(r -> { + executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap(r -> { listener.onResponse(true); }, listener::onFailure)); } catch (IOException e) { @@ -129,28 +129,28 @@ public class DataFrameTransformsConfigManager { /** * Store the transform configuration in the internal index * - * @param transformConfig the @link{DataFrameTransformConfig} + * @param transformConfig the @link{TransformConfig} * @param listener listener to call after request */ - public void putTransformConfiguration(DataFrameTransformConfig transformConfig, ActionListener listener) { + public void putTransformConfiguration(TransformConfig transformConfig, ActionListener listener) { putTransformConfiguration(transformConfig, DocWriteRequest.OpType.CREATE, null, listener); } /** * Update the transform configuration in the internal index. * - * Essentially the same as {@link DataFrameTransformsConfigManager#putTransformConfiguration(DataFrameTransformConfig, ActionListener)} + * Essentially the same as {@link TransformConfigManager#putTransformConfiguration(TransformConfig, ActionListener)} * but is an index operation that will fail with a version conflict * if the current document seqNo and primaryTerm is not the same as the provided version. - * @param transformConfig the @link{DataFrameTransformConfig} + * @param transformConfig the @link{TransformConfig} * @param seqNoPrimaryTermAndIndex an object containing the believed seqNo, primaryTerm and index for the doc. * Used for optimistic concurrency control * @param listener listener to call after request */ - public void updateTransformConfiguration(DataFrameTransformConfig transformConfig, + public void updateTransformConfiguration(TransformConfig transformConfig, SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, ActionListener listener) { - if (seqNoPrimaryTermAndIndex.getIndex().equals(DataFrameInternalIndex.LATEST_INDEX_NAME)) { + if (seqNoPrimaryTermAndIndex.getIndex().equals(TransformInternalIndex.LATEST_INDEX_NAME)) { // update the config in the same, current index using optimistic concurrency control putTransformConfiguration(transformConfig, DocWriteRequest.OpType.INDEX, seqNoPrimaryTermAndIndex, listener); } else { @@ -167,13 +167,13 @@ public class DataFrameTransformsConfigManager { * @param listener listener to alert on completion */ public void deleteOldTransformConfigurations(String transformId, ActionListener listener) { - DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(DataFrameInternalIndex.INDEX_NAME_PATTERN) + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(TransformInternalIndex.INDEX_NAME_PATTERN) .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() - .mustNot(QueryBuilders.termQuery("_index", DataFrameInternalIndex.LATEST_INDEX_NAME)) - .filter(QueryBuilders.termQuery("_id", DataFrameTransformConfig.documentId(transformId))))) + .mustNot(QueryBuilders.termQuery("_index", TransformInternalIndex.LATEST_INDEX_NAME)) + .filter(QueryBuilders.termQuery("_id", TransformConfig.documentId(transformId))))) .setIndicesOptions(IndicesOptions.lenientExpandOpen()); - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, DeleteByQueryAction.INSTANCE, deleteByQueryRequest, ActionListener.wrap( + executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, DeleteByQueryAction.INSTANCE, deleteByQueryRequest, ActionListener.wrap( response -> { if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { Tuple statusAndReason = getStatusAndReason(response); @@ -194,13 +194,13 @@ public class DataFrameTransformsConfigManager { * @param listener listener to alert on completion */ public void deleteOldTransformStoredDocuments(String transformId, ActionListener listener) { - DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(DataFrameInternalIndex.INDEX_NAME_PATTERN) + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(TransformInternalIndex.INDEX_NAME_PATTERN) .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() - .mustNot(QueryBuilders.termQuery("_index", DataFrameInternalIndex.LATEST_INDEX_NAME)) - .filter(QueryBuilders.termQuery("_id", DataFrameTransformStoredDoc.documentId(transformId))))) + .mustNot(QueryBuilders.termQuery("_index", TransformInternalIndex.LATEST_INDEX_NAME)) + .filter(QueryBuilders.termQuery("_id", TransformStoredDoc.documentId(transformId))))) .setIndicesOptions(IndicesOptions.lenientExpandOpen()); - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, DeleteByQueryAction.INSTANCE, deleteByQueryRequest, ActionListener.wrap( + executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, DeleteByQueryAction.INSTANCE, deleteByQueryRequest, ActionListener.wrap( response -> { if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { Tuple statusAndReason = getStatusAndReason(response); @@ -214,39 +214,39 @@ public class DataFrameTransformsConfigManager { )); } - private void putTransformConfiguration(DataFrameTransformConfig transformConfig, + private void putTransformConfiguration(TransformConfig transformConfig, DocWriteRequest.OpType optType, SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, ActionListener listener) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { XContentBuilder source = transformConfig.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); - IndexRequest indexRequest = new IndexRequest(DataFrameInternalIndex.LATEST_INDEX_NAME) + IndexRequest indexRequest = new IndexRequest(TransformInternalIndex.LATEST_INDEX_NAME) .opType(optType) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .id(DataFrameTransformConfig.documentId(transformConfig.getId())) + .id(TransformConfig.documentId(transformConfig.getId())) .source(source); if (seqNoPrimaryTermAndIndex != null) { indexRequest.setIfSeqNo(seqNoPrimaryTermAndIndex.getSeqNo()) .setIfPrimaryTerm(seqNoPrimaryTermAndIndex.getPrimaryTerm()); } - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap(r -> { + executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap(r -> { listener.onResponse(true); }, e -> { if (e instanceof VersionConflictEngineException) { // the transform already exists listener.onFailure(new ResourceAlreadyExistsException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_TRANSFORM_EXISTS, + TransformMessages.getMessage(TransformMessages.REST_PUT_TRANSFORM_EXISTS, transformConfig.getId()))); } else { listener.onFailure( - new RuntimeException(DataFrameMessages.REST_PUT_DATA_FRAME_FAILED_PERSIST_TRANSFORM_CONFIGURATION, e)); + new RuntimeException(TransformMessages.REST_PUT_FAILED_PERSIST_TRANSFORM_CONFIGURATION, e)); } })); } catch (IOException e) { // not expected to happen but for the sake of completeness listener.onFailure(new ElasticsearchParseException( - DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_FAILED_TO_SERIALIZE_TRANSFORM, transformConfig.getId()), + TransformMessages.getMessage(TransformMessages.REST_FAILED_TO_SERIALIZE_TRANSFORM, transformConfig.getId()), e)); } } @@ -258,21 +258,21 @@ public class DataFrameTransformsConfigManager { * @param checkpoint the checkpoint * @param resultListener listener to call after request has been made */ - public void getTransformCheckpoint(String transformId, long checkpoint, ActionListener resultListener) { - QueryBuilder queryBuilder = QueryBuilders.termQuery("_id", DataFrameTransformCheckpoint.documentId(transformId, checkpoint)); - SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) + public void getTransformCheckpoint(String transformId, long checkpoint, ActionListener resultListener) { + QueryBuilder queryBuilder = QueryBuilders.termQuery("_id", TransformCheckpoint.documentId(transformId, checkpoint)); + SearchRequest searchRequest = client.prepareSearch(TransformInternalIndex.INDEX_NAME_PATTERN) .setQuery(queryBuilder) // use sort to get the last .addSort("_index", SortOrder.DESC) .setSize(1) .request(); - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap( + executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap( searchResponse -> { if (searchResponse.getHits().getHits().length == 0) { // do not fail if checkpoint does not exist but return an empty checkpoint logger.trace("found no checkpoint for transform [" + transformId + "], returning empty checkpoint"); - resultListener.onResponse(DataFrameTransformCheckpoint.EMPTY); + resultListener.onResponse(TransformCheckpoint.EMPTY); return; } BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); @@ -282,26 +282,26 @@ public class DataFrameTransformsConfigManager { /** * Get the transform configuration for a given transform id. This function is only for internal use. For transforms returned via GET - * data_frame/transforms, see the TransportGetDataFrameTransformsAction + * data_frame/transforms, see the @link{TransportGetTransformAction} * * @param transformId the transform id * @param resultListener listener to call after inner request has returned */ - public void getTransformConfiguration(String transformId, ActionListener resultListener) { - QueryBuilder queryBuilder = QueryBuilders.termQuery("_id", DataFrameTransformConfig.documentId(transformId)); - SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) + public void getTransformConfiguration(String transformId, ActionListener resultListener) { + QueryBuilder queryBuilder = QueryBuilders.termQuery("_id", TransformConfig.documentId(transformId)); + SearchRequest searchRequest = client.prepareSearch(TransformInternalIndex.INDEX_NAME_PATTERN) .setQuery(queryBuilder) // use sort to get the last .addSort("_index", SortOrder.DESC) .setSize(1) .request(); - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, SearchAction.INSTANCE, searchRequest, + executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap( searchResponse -> { if (searchResponse.getHits().getHits().length == 0) { resultListener.onFailure(new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); + TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId))); return; } BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); @@ -311,16 +311,16 @@ public class DataFrameTransformsConfigManager { /** * Get the transform configuration for a given transform id. This function is only for internal use. For transforms returned via GET - * data_frame/transforms, see the TransportGetDataFrameTransformsAction + * data_frame/transforms, see the @link{TransportGetTransformAction} * * @param transformId the transform id * @param configAndVersionListener listener to call after inner request has returned */ public void getTransformConfigurationForUpdate(String transformId, - ActionListener> configAndVersionListener) { - QueryBuilder queryBuilder = QueryBuilders.termQuery("_id", DataFrameTransformConfig.documentId(transformId)); - SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) + QueryBuilder queryBuilder = QueryBuilders.termQuery("_id", TransformConfig.documentId(transformId)); + SearchRequest searchRequest = client.prepareSearch(TransformInternalIndex.INDEX_NAME_PATTERN) .setQuery(queryBuilder) // use sort to get the last .addSort("_index", SortOrder.DESC) @@ -328,11 +328,11 @@ public class DataFrameTransformsConfigManager { .seqNoAndPrimaryTerm(true) .request(); - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap( + executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap( searchResponse -> { if (searchResponse.getHits().getHits().length == 0) { configAndVersionListener.onFailure(new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); + TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId))); return; } SearchHit hit = searchResponse.getHits().getHits()[0]; @@ -359,21 +359,21 @@ public class DataFrameTransformsConfigManager { boolean allowNoMatch, ActionListener>> foundIdsListener) { String[] idTokens = ExpandedIdsMatcher.tokenizeExpression(transformIdsExpression); - QueryBuilder queryBuilder = buildQueryFromTokenizedIds(idTokens, DataFrameTransformConfig.NAME); + QueryBuilder queryBuilder = buildQueryFromTokenizedIds(idTokens, TransformConfig.NAME); - SearchRequest request = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) - .addSort(DataFrameField.ID.getPreferredName(), SortOrder.ASC) + SearchRequest request = client.prepareSearch(TransformInternalIndex.INDEX_NAME_PATTERN) + .addSort(TransformField.ID.getPreferredName(), SortOrder.ASC) .setFrom(pageParams.getFrom()) .setTrackTotalHits(true) .setSize(pageParams.getSize()) .setQuery(queryBuilder) // We only care about the `id` field, small optimization - .setFetchSource(DataFrameField.ID.getPreferredName(), "") + .setFetchSource(TransformField.ID.getPreferredName(), "") .request(); final ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(idTokens, allowNoMatch); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), DATA_FRAME_ORIGIN, request, ActionListener.wrap( + executeAsyncWithOrigin(client.threadPool().getThreadContext(), TRANSFORM_ORIGIN, request, ActionListener.wrap( searchResponse -> { long totalHits = searchResponse.getHits().getTotalHits().value; // important: preserve order @@ -383,7 +383,7 @@ public class DataFrameTransformsConfigManager { try (InputStream stream = source.streamInput(); XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { - ids.add((String) parser.map().get(DataFrameField.ID.getPreferredName())); + ids.add((String) parser.map().get(TransformField.ID.getPreferredName())); } catch (IOException e) { foundIdsListener.onFailure(new ElasticsearchParseException("failed to parse search hit for ids", e)); return; @@ -394,7 +394,7 @@ public class DataFrameTransformsConfigManager { // some required Ids were not found foundIdsListener.onFailure( new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, + TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, requiredMatches.unmatchedIdsString()))); return; } @@ -412,40 +412,40 @@ public class DataFrameTransformsConfigManager { DeleteByQueryRequest request = new DeleteByQueryRequest() .setAbortOnVersionConflict(false); //since these documents are not updated, a conflict just means it was deleted previously - request.indices(DataFrameInternalIndex.INDEX_NAME_PATTERN); - QueryBuilder query = QueryBuilders.termQuery(DataFrameField.ID.getPreferredName(), transformId); + request.indices(TransformInternalIndex.INDEX_NAME_PATTERN); + QueryBuilder query = QueryBuilders.termQuery(TransformField.ID.getPreferredName(), transformId); request.setQuery(query); request.setRefresh(true); - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, DeleteByQueryAction.INSTANCE, request, ActionListener.wrap(deleteResponse -> { + executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, DeleteByQueryAction.INSTANCE, request, ActionListener.wrap(deleteResponse -> { if (deleteResponse.getDeleted() == 0) { listener.onFailure(new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); + TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId))); return; } listener.onResponse(true); }, e -> { if (e.getClass() == IndexNotFoundException.class) { listener.onFailure(new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); + TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformId))); } else { listener.onFailure(e); } })); } - public void putOrUpdateTransformStoredDoc(DataFrameTransformStoredDoc stats, + public void putOrUpdateTransformStoredDoc(TransformStoredDoc stats, SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, ActionListener listener) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { XContentBuilder source = stats.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); - IndexRequest indexRequest = new IndexRequest(DataFrameInternalIndex.LATEST_INDEX_NAME) + IndexRequest indexRequest = new IndexRequest(TransformInternalIndex.LATEST_INDEX_NAME) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .id(DataFrameTransformStoredDoc.documentId(stats.getId())) + .id(TransformStoredDoc.documentId(stats.getId())) .source(source); if (seqNoPrimaryTermAndIndex != null && - seqNoPrimaryTermAndIndex.getIndex().equals(DataFrameInternalIndex.LATEST_INDEX_NAME)) { + seqNoPrimaryTermAndIndex.getIndex().equals(TransformInternalIndex.LATEST_INDEX_NAME)) { indexRequest.opType(DocWriteRequest.OpType.INDEX) .setIfSeqNo(seqNoPrimaryTermAndIndex.getSeqNo()) .setIfPrimaryTerm(seqNoPrimaryTermAndIndex.getPrimaryTerm()); @@ -454,24 +454,24 @@ public class DataFrameTransformsConfigManager { // so, it should be a create option without the seqNo and primaryTerm set indexRequest.opType(DocWriteRequest.OpType.CREATE); } - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( + executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( r -> listener.onResponse(SeqNoPrimaryTermAndIndex.fromIndexResponse(r)), e -> listener.onFailure(new RuntimeException( - DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_FAILED_TO_PERSIST_STATS, stats.getId()), + TransformMessages.getMessage(TransformMessages.TRANSFORM_FAILED_TO_PERSIST_STATS, stats.getId()), e)) )); } catch (IOException e) { // not expected to happen but for the sake of completeness listener.onFailure(new ElasticsearchParseException( - DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_FAILED_TO_PERSIST_STATS, stats.getId()), + TransformMessages.getMessage(TransformMessages.TRANSFORM_FAILED_TO_PERSIST_STATS, stats.getId()), e)); } } public void getTransformStoredDoc(String transformId, - ActionListener> resultListener) { - QueryBuilder queryBuilder = QueryBuilders.termQuery("_id", DataFrameTransformStoredDoc.documentId(transformId)); - SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) + ActionListener> resultListener) { + QueryBuilder queryBuilder = QueryBuilders.termQuery("_id", TransformStoredDoc.documentId(transformId)); + SearchRequest searchRequest = client.prepareSearch(TransformInternalIndex.INDEX_NAME_PATTERN) .setQuery(queryBuilder) // use sort to get the last .addSort("_index", SortOrder.DESC) @@ -479,11 +479,11 @@ public class DataFrameTransformsConfigManager { .seqNoAndPrimaryTerm(true) .request(); - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap( + executeAsyncWithOrigin(client, TRANSFORM_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap( searchResponse -> { if (searchResponse.getHits().getHits().length == 0) { resultListener.onFailure(new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_UNKNOWN_TRANSFORM_STATS, transformId))); + TransformMessages.getMessage(TransformMessages.UNKNOWN_TRANSFORM_STATS, transformId))); return; } SearchHit searchHit = searchResponse.getHits().getHits()[0]; @@ -492,33 +492,33 @@ public class DataFrameTransformsConfigManager { XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream)) { resultListener.onResponse( - Tuple.tuple(DataFrameTransformStoredDoc.fromXContent(parser), + Tuple.tuple(TransformStoredDoc.fromXContent(parser), SeqNoPrimaryTermAndIndex.fromSearchHit(searchHit))); } catch (Exception e) { - logger.error(DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_PARSE_TRANSFORM_STATISTICS_CONFIGURATION, + logger.error(TransformMessages.getMessage(TransformMessages.FAILED_TO_PARSE_TRANSFORM_STATISTICS_CONFIGURATION, transformId), e); resultListener.onFailure(e); } }, resultListener::onFailure)); } - public void getTransformStoredDoc(Collection transformIds, ActionListener> listener) { + public void getTransformStoredDoc(Collection transformIds, ActionListener> listener) { QueryBuilder builder = QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() - .filter(QueryBuilders.termsQuery(DataFrameField.ID.getPreferredName(), transformIds)) - .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), DataFrameTransformStoredDoc.NAME))); + .filter(QueryBuilders.termsQuery(TransformField.ID.getPreferredName(), transformIds)) + .filter(QueryBuilders.termQuery(TransformField.INDEX_DOC_TYPE.getPreferredName(), TransformStoredDoc.NAME))); - SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) - .addSort(DataFrameField.ID.getPreferredName(), SortOrder.ASC) + SearchRequest searchRequest = client.prepareSearch(TransformInternalIndex.INDEX_NAME_PATTERN) + .addSort(TransformField.ID.getPreferredName(), SortOrder.ASC) .addSort("_index", SortOrder.DESC) .setQuery(builder) // the limit for getting stats and transforms is 1000, as long as we do not have 10 indices this works .setSize(Math.min(transformIds.size(), 10_000)) .request(); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), DATA_FRAME_ORIGIN, searchRequest, + executeAsyncWithOrigin(client.threadPool().getThreadContext(), TRANSFORM_ORIGIN, searchRequest, ActionListener.wrap( searchResponse -> { - List stats = new ArrayList<>(); + List stats = new ArrayList<>(); String previousId = null; for (SearchHit hit : searchResponse.getHits().getHits()) { // skip old versions @@ -528,10 +528,10 @@ public class DataFrameTransformsConfigManager { try (InputStream stream = source.streamInput(); XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { - stats.add(DataFrameTransformStoredDoc.fromXContent(parser)); + stats.add(TransformStoredDoc.fromXContent(parser)); } catch (IOException e) { listener.onFailure( - new ElasticsearchParseException("failed to parse data frame stats from search hit", e)); + new ElasticsearchParseException("failed to parse transform stats from search hit", e)); return; } } @@ -543,44 +543,44 @@ public class DataFrameTransformsConfigManager { } private void parseTransformLenientlyFromSource(BytesReference source, String transformId, - ActionListener transformListener) { + ActionListener transformListener) { try (InputStream stream = source.streamInput(); XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream)) { - transformListener.onResponse(DataFrameTransformConfig.fromXContent(parser, transformId, true)); + transformListener.onResponse(TransformConfig.fromXContent(parser, transformId, true)); } catch (Exception e) { - logger.error(DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_PARSE_TRANSFORM_CONFIGURATION, transformId), e); + logger.error(TransformMessages.getMessage(TransformMessages.FAILED_TO_PARSE_TRANSFORM_CONFIGURATION, transformId), e); transformListener.onFailure(e); } } private void parseCheckpointsLenientlyFromSource(BytesReference source, String transformId, - ActionListener transformListener) { + ActionListener transformListener) { try (InputStream stream = source.streamInput(); XContentParser parser = XContentFactory.xContent(XContentType.JSON) .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream)) { - transformListener.onResponse(DataFrameTransformCheckpoint.fromXContent(parser, true)); + transformListener.onResponse(TransformCheckpoint.fromXContent(parser, true)); } catch (Exception e) { - logger.error(DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_PARSE_TRANSFORM_CHECKPOINTS, transformId), e); + logger.error(TransformMessages.getMessage(TransformMessages.FAILED_TO_PARSE_TRANSFORM_CHECKPOINTS, transformId), e); transformListener.onFailure(e); } } private QueryBuilder buildQueryFromTokenizedIds(String[] idTokens, String resourceName) { BoolQueryBuilder queryBuilder = QueryBuilders.boolQuery() - .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), resourceName)); + .filter(QueryBuilders.termQuery(TransformField.INDEX_DOC_TYPE.getPreferredName(), resourceName)); if (Strings.isAllOrWildcard(idTokens) == false) { List terms = new ArrayList<>(); BoolQueryBuilder shouldQueries = new BoolQueryBuilder(); for (String token : idTokens) { if (Regex.isSimpleMatchPattern(token)) { - shouldQueries.should(QueryBuilders.wildcardQuery(DataFrameField.ID.getPreferredName(), token)); + shouldQueries.should(QueryBuilders.wildcardQuery(TransformField.ID.getPreferredName(), token)); } else { terms.add(token); } } if (terms.isEmpty() == false) { - shouldQueries.should(QueryBuilders.termsQuery(DataFrameField.ID.getPreferredName(), terms)); + shouldQueries.should(QueryBuilders.termsQuery(TransformField.ID.getPreferredName(), terms)); } if (shouldQueries.should().isEmpty() == false) { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java similarity index 79% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java index 347e0b91428..28ee24ba5a0 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.persistence; +package org.elasticsearch.xpack.transform.persistence; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -16,9 +16,9 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import java.io.IOException; import java.time.Clock; @@ -27,20 +27,20 @@ import java.util.Map.Entry; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -public final class DataframeIndex { - private static final Logger logger = LogManager.getLogger(DataframeIndex.class); +public final class TransformIndex { + private static final Logger logger = LogManager.getLogger(TransformIndex.class); public static final String DOC_TYPE = "_doc"; private static final String PROPERTIES = "properties"; private static final String TYPE = "type"; private static final String META = "_meta"; - private DataframeIndex() { + private TransformIndex() { } public static void createDestinationIndex(Client client, Clock clock, - DataFrameTransformConfig transformConfig, + TransformConfig transformConfig, Map mappings, ActionListener listener) { CreateIndexRequest request = new CreateIndexRequest(transformConfig.getDestination().getIndex()); @@ -57,7 +57,7 @@ public final class DataframeIndex { client.execute(CreateIndexAction.INSTANCE, request, ActionListener.wrap(createIndexResponse -> { listener.onResponse(true); }, e -> { - String message = DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_CREATE_DESTINATION_INDEX, + String message = TransformMessages.getMessage(TransformMessages.FAILED_TO_CREATE_DESTINATION_INDEX, transformConfig.getDestination().getIndex(), transformConfig.getId()); logger.error(message); listener.onFailure(new RuntimeException(message, e)); @@ -97,13 +97,13 @@ public final class DataframeIndex { private static XContentBuilder addMetaData(XContentBuilder builder, String id, Clock clock) throws IOException { return builder.startObject(META) - .field(DataFrameField.CREATED_BY, DataFrameField.DATA_FRAME_SIGNATURE) - .startObject(DataFrameField.META_FIELDNAME) - .field(DataFrameField.CREATION_DATE_MILLIS, clock.millis()) - .startObject(DataFrameField.VERSION.getPreferredName()) - .field(DataFrameField.CREATED, Version.CURRENT) + .field(TransformField.CREATED_BY, TransformField.TRANSFORM_SIGNATURE) + .startObject(TransformField.META_FIELDNAME) + .field(TransformField.CREATION_DATE_MILLIS, clock.millis()) + .startObject(TransformField.VERSION.getPreferredName()) + .field(TransformField.CREATED, Version.CURRENT) .endObject() - .field(DataFrameField.TRANSFORM, id) + .field(TransformField.TRANSFORM, id) .endObject() // META_FIELDNAME .endObject(); // META } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndex.java similarity index 57% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndex.java index f1f5dd85b93..9a2f50b1679 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndex.java @@ -4,32 +4,44 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.persistence; +package org.elasticsearch.xpack.transform.persistence; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.xpack.core.common.notifications.AbstractAuditMessage; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStoredDoc; -import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.transforms.DestConfig; +import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; +import org.elasticsearch.xpack.core.transform.transforms.TransformState; +import org.elasticsearch.xpack.core.transform.transforms.TransformStoredDoc; import java.io.IOException; import java.util.Collections; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; -import static org.elasticsearch.xpack.core.dataframe.DataFrameField.TRANSFORM_ID; +import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.core.transform.TransformField.TRANSFORM_ID; -public final class DataFrameInternalIndex { +public final class TransformInternalIndex { /* Changelog of internal index versions * @@ -70,7 +82,7 @@ public final class DataFrameInternalIndex { public static final String KEYWORD = "keyword"; public static IndexTemplateMetaData getIndexTemplateMetaData() throws IOException { - IndexTemplateMetaData dataFrameTemplate = IndexTemplateMetaData.builder(LATEST_INDEX_VERSIONED_NAME) + IndexTemplateMetaData transformTemplate = IndexTemplateMetaData.builder(LATEST_INDEX_VERSIONED_NAME) .patterns(Collections.singletonList(LATEST_INDEX_VERSIONED_NAME)) .version(Version.CURRENT.id) .settings(Settings.builder() @@ -80,11 +92,11 @@ public final class DataFrameInternalIndex { // todo: remove type .putMapping(MapperService.SINGLE_MAPPING_NAME, Strings.toString(mappings())) .build(); - return dataFrameTemplate; + return transformTemplate; } public static IndexTemplateMetaData getAuditIndexTemplateMetaData() throws IOException { - IndexTemplateMetaData dataFrameTemplate = IndexTemplateMetaData.builder(AUDIT_INDEX) + IndexTemplateMetaData transformTemplate = IndexTemplateMetaData.builder(AUDIT_INDEX) .patterns(Collections.singletonList(AUDIT_INDEX_PREFIX + "*")) .version(Version.CURRENT.id) .settings(Settings.builder() @@ -93,7 +105,7 @@ public final class DataFrameInternalIndex { .put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1")) .putMapping(MapperService.SINGLE_MAPPING_NAME, Strings.toString(auditMappings())) .build(); - return dataFrameTemplate; + return transformTemplate; } private static XContentBuilder auditMappings() throws IOException { @@ -131,6 +143,10 @@ public final class DataFrameInternalIndex { public static XContentBuilder mappings() throws IOException { XContentBuilder builder = jsonBuilder(); + return mappings(builder); + } + + public static XContentBuilder mappings(XContentBuilder builder) throws IOException { builder.startObject(); builder.startObject(MapperService.SINGLE_MAPPING_NAME); @@ -141,13 +157,13 @@ public final class DataFrameInternalIndex { // the schema definitions builder.startObject(PROPERTIES); // overall doc type - builder.startObject(DataFrameField.INDEX_DOC_TYPE.getPreferredName()).field(TYPE, KEYWORD).endObject(); + builder.startObject(TransformField.INDEX_DOC_TYPE.getPreferredName()).field(TYPE, KEYWORD).endObject(); // add the schema for transform configurations - addDataFrameTransformsConfigMappings(builder); + addTransformsConfigMappings(builder); // add the schema for transform stats - addDataFrameTransformStoredDocMappings(builder); + addTransformStoredDocMappings(builder); // add the schema for checkpoints - addDataFrameCheckpointMappings(builder); + addTransformCheckpointMappings(builder); // end type builder.endObject(); // end properties @@ -158,85 +174,85 @@ public final class DataFrameInternalIndex { } - private static XContentBuilder addDataFrameTransformStoredDocMappings(XContentBuilder builder) throws IOException { + private static XContentBuilder addTransformStoredDocMappings(XContentBuilder builder) throws IOException { return builder - .startObject(DataFrameTransformStoredDoc.STATE_FIELD.getPreferredName()) + .startObject(TransformStoredDoc.STATE_FIELD.getPreferredName()) .startObject(PROPERTIES) - .startObject(DataFrameTransformState.TASK_STATE.getPreferredName()) + .startObject(TransformState.TASK_STATE.getPreferredName()) .field(TYPE, KEYWORD) .endObject() - .startObject(DataFrameTransformState.INDEXER_STATE.getPreferredName()) + .startObject(TransformState.INDEXER_STATE.getPreferredName()) .field(TYPE, KEYWORD) .endObject() - .startObject(DataFrameTransformState.CURRENT_POSITION.getPreferredName()) + .startObject(TransformState.CURRENT_POSITION.getPreferredName()) .field(ENABLED, false) .endObject() - .startObject(DataFrameTransformState.CHECKPOINT.getPreferredName()) + .startObject(TransformState.CHECKPOINT.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameTransformState.REASON.getPreferredName()) + .startObject(TransformState.REASON.getPreferredName()) .field(TYPE, KEYWORD) .endObject() - .startObject(DataFrameTransformState.PROGRESS.getPreferredName()) + .startObject(TransformState.PROGRESS.getPreferredName()) .startObject(PROPERTIES) - .startObject(DataFrameTransformProgress.TOTAL_DOCS.getPreferredName()) + .startObject(TransformProgress.TOTAL_DOCS.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameTransformProgress.DOCS_REMAINING.getPreferredName()) + .startObject(TransformProgress.DOCS_REMAINING.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameTransformProgress.PERCENT_COMPLETE) + .startObject(TransformProgress.PERCENT_COMPLETE) .field(TYPE, FLOAT) .endObject() - .startObject(DataFrameTransformProgress.DOCS_INDEXED.getPreferredName()) + .startObject(TransformProgress.DOCS_INDEXED.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameTransformProgress.DOCS_PROCESSED.getPreferredName()) + .startObject(TransformProgress.DOCS_PROCESSED.getPreferredName()) .field(TYPE, LONG) .endObject() .endObject() .endObject() .endObject() .endObject() - .startObject(DataFrameField.STATS_FIELD.getPreferredName()) + .startObject(TransformField.STATS_FIELD.getPreferredName()) .startObject(PROPERTIES) - .startObject(DataFrameIndexerTransformStats.NUM_PAGES.getPreferredName()) + .startObject(TransformIndexerStats.NUM_PAGES.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameIndexerTransformStats.NUM_INPUT_DOCUMENTS.getPreferredName()) + .startObject(TransformIndexerStats.NUM_INPUT_DOCUMENTS.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameIndexerTransformStats.NUM_OUTPUT_DOCUMENTS.getPreferredName()) + .startObject(TransformIndexerStats.NUM_OUTPUT_DOCUMENTS.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameIndexerTransformStats.NUM_INVOCATIONS.getPreferredName()) + .startObject(TransformIndexerStats.NUM_INVOCATIONS.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameIndexerTransformStats.INDEX_TIME_IN_MS.getPreferredName()) + .startObject(TransformIndexerStats.INDEX_TIME_IN_MS.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameIndexerTransformStats.SEARCH_TIME_IN_MS.getPreferredName()) + .startObject(TransformIndexerStats.SEARCH_TIME_IN_MS.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameIndexerTransformStats.INDEX_TOTAL.getPreferredName()) + .startObject(TransformIndexerStats.INDEX_TOTAL.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameIndexerTransformStats.SEARCH_TOTAL.getPreferredName()) + .startObject(TransformIndexerStats.SEARCH_TOTAL.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameIndexerTransformStats.SEARCH_FAILURES.getPreferredName()) + .startObject(TransformIndexerStats.SEARCH_FAILURES.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameIndexerTransformStats.INDEX_FAILURES.getPreferredName()) + .startObject(TransformIndexerStats.INDEX_FAILURES.getPreferredName()) .field(TYPE, LONG) .endObject() - .startObject(DataFrameIndexerTransformStats.EXPONENTIAL_AVG_CHECKPOINT_DURATION_MS.getPreferredName()) + .startObject(TransformIndexerStats.EXPONENTIAL_AVG_CHECKPOINT_DURATION_MS.getPreferredName()) .field(TYPE, DOUBLE) .endObject() - .startObject(DataFrameIndexerTransformStats.EXPONENTIAL_AVG_DOCUMENTS_INDEXED.getPreferredName()) + .startObject(TransformIndexerStats.EXPONENTIAL_AVG_DOCUMENTS_INDEXED.getPreferredName()) .field(TYPE, DOUBLE) .endObject() - .startObject(DataFrameIndexerTransformStats.EXPONENTIAL_AVG_DOCUMENTS_PROCESSED.getPreferredName()) + .startObject(TransformIndexerStats.EXPONENTIAL_AVG_DOCUMENTS_PROCESSED.getPreferredName()) .field(TYPE, DOUBLE) .endObject() .endObject() @@ -246,12 +262,12 @@ public final class DataFrameInternalIndex { // .startObject("checkpointing").field(ENABLED, false).endObject(); } - public static XContentBuilder addDataFrameTransformsConfigMappings(XContentBuilder builder) throws IOException { + public static XContentBuilder addTransformsConfigMappings(XContentBuilder builder) throws IOException { return builder - .startObject(DataFrameField.ID.getPreferredName()) + .startObject(TransformField.ID.getPreferredName()) .field(TYPE, KEYWORD) .endObject() - .startObject(DataFrameField.SOURCE.getPreferredName()) + .startObject(TransformField.SOURCE.getPreferredName()) .startObject(PROPERTIES) .startObject(SourceConfig.INDEX.getPreferredName()) .field(TYPE, KEYWORD) @@ -261,30 +277,30 @@ public final class DataFrameInternalIndex { .endObject() .endObject() .endObject() - .startObject(DataFrameField.DESTINATION.getPreferredName()) + .startObject(TransformField.DESTINATION.getPreferredName()) .startObject(PROPERTIES) .startObject(DestConfig.INDEX.getPreferredName()) .field(TYPE, KEYWORD) .endObject() .endObject() .endObject() - .startObject(DataFrameField.DESCRIPTION.getPreferredName()) + .startObject(TransformField.DESCRIPTION.getPreferredName()) .field(TYPE, TEXT) .endObject() - .startObject(DataFrameField.VERSION.getPreferredName()) + .startObject(TransformField.VERSION.getPreferredName()) .field(TYPE, KEYWORD) .endObject() - .startObject(DataFrameField.CREATE_TIME.getPreferredName()) + .startObject(TransformField.CREATE_TIME.getPreferredName()) .field(TYPE, DATE) .endObject(); } - private static XContentBuilder addDataFrameCheckpointMappings(XContentBuilder builder) throws IOException { + private static XContentBuilder addTransformCheckpointMappings(XContentBuilder builder) throws IOException { return builder - .startObject(DataFrameField.TIMESTAMP_MILLIS.getPreferredName()) + .startObject(TransformField.TIMESTAMP_MILLIS.getPreferredName()) .field(TYPE, DATE) .endObject() - .startObject(DataFrameField.TIME_UPPER_BOUND_MILLIS.getPreferredName()) + .startObject(TransformField.TIME_UPPER_BOUND_MILLIS.getPreferredName()) .field(TYPE, DATE) .endObject(); } @@ -302,6 +318,46 @@ public final class DataFrameInternalIndex { .endObject(); } - private DataFrameInternalIndex() { + public static boolean haveLatestVersionedIndexTemplate(ClusterState state) { + return state.getMetaData().getTemplates().containsKey(LATEST_INDEX_VERSIONED_NAME); + } + + /** + * This method should be called before any document is indexed that relies on the + * existence of the latest index template to create the internal index. The + * reason is that the standard template upgrader only runs when the master node + * is upgraded to the newer version. If data nodes are upgraded before master + * nodes and transforms get assigned to those data nodes then without this check + * the data nodes will index documents into the internal index before the necessary + * index template is present and this will result in an index with completely + * dynamic mappings being created (which is very bad). + */ + public static void installLatestVersionedIndexTemplateIfRequired(ClusterService clusterService, Client client, + ActionListener listener) { + + // The check for existence of the template is against local cluster state, so very cheap + if (haveLatestVersionedIndexTemplate(clusterService.state())) { + listener.onResponse(null); + return; + } + + // Installing the template involves communication with the master node, so it's more expensive but much rarer + try { + IndexTemplateMetaData indexTemplateMetaData = getIndexTemplateMetaData(); + BytesReference jsonMappings = new BytesArray(indexTemplateMetaData.mappings().get(SINGLE_MAPPING_NAME).uncompressed()); + PutIndexTemplateRequest request = new PutIndexTemplateRequest(LATEST_INDEX_VERSIONED_NAME) + .patterns(indexTemplateMetaData.patterns()) + .version(indexTemplateMetaData.version()) + .settings(indexTemplateMetaData.settings()) + .mapping(SINGLE_MAPPING_NAME, XContentHelper.convertToMap(jsonMappings, true, XContentType.JSON).v2()); + ActionListener innerListener = ActionListener.wrap(r -> listener.onResponse(null), listener::onFailure); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), TRANSFORM_ORIGIN, request, + innerListener, client.admin().indices()::putTemplate); + } catch (IOException e) { + listener.onFailure(e); + } + } + + private TransformInternalIndex() { } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestDeleteTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestDeleteTransformAction.java new file mode 100644 index 00000000000..a240801420a --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestDeleteTransformAction.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.transform.rest.action; + + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.action.DeleteTransformAction; + +public class RestDeleteTransformAction extends BaseRestHandler { + + public RestDeleteTransformAction(RestController controller) { + controller.registerHandler(RestRequest.Method.DELETE, TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID, this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + if (restRequest.hasContent()) { + throw new IllegalArgumentException("delete transform requests can not have a request body"); + } + + String id = restRequest.param(TransformField.ID.getPreferredName()); + boolean force = restRequest.paramAsBoolean(TransformField.FORCE.getPreferredName(), false); + DeleteTransformAction.Request request = new DeleteTransformAction.Request(id, force); + + return channel -> client.execute(DeleteTransformAction.INSTANCE, request, + new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "data_frame_delete_transform_action"; + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformAction.java similarity index 59% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsAction.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformAction.java index 7dce1c7789a..d5ddef41d0c 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.rest.action; +package org.elasticsearch.xpack.transform.rest.action; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; @@ -12,23 +12,23 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.action.util.PageParams; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsAction; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.action.GetTransformsAction; -import static org.elasticsearch.xpack.core.dataframe.DataFrameField.ALLOW_NO_MATCH; +import static org.elasticsearch.xpack.core.transform.TransformField.ALLOW_NO_MATCH; -public class RestGetDataFrameTransformsAction extends BaseRestHandler { +public class RestGetTransformAction extends BaseRestHandler { - public RestGetDataFrameTransformsAction(RestController controller) { - controller.registerHandler(RestRequest.Method.GET, DataFrameField.REST_BASE_PATH_TRANSFORMS, this); - controller.registerHandler(RestRequest.Method.GET, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID, this); + public RestGetTransformAction(RestController controller) { + controller.registerHandler(RestRequest.Method.GET, TransformField.REST_BASE_PATH_TRANSFORMS, this); + controller.registerHandler(RestRequest.Method.GET, TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID, this); } @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - GetDataFrameTransformsAction.Request request = new GetDataFrameTransformsAction.Request(); + GetTransformsAction.Request request = new GetTransformsAction.Request(); - String id = restRequest.param(DataFrameField.ID.getPreferredName()); + String id = restRequest.param(TransformField.ID.getPreferredName()); request.setResourceId(id); request.setAllowNoResources(restRequest.paramAsBoolean(ALLOW_NO_MATCH.getPreferredName(), true)); if (restRequest.hasParam(PageParams.FROM.getPreferredName()) || restRequest.hasParam(PageParams.SIZE.getPreferredName())) { @@ -36,7 +36,7 @@ public class RestGetDataFrameTransformsAction extends BaseRestHandler { new PageParams(restRequest.paramAsInt(PageParams.FROM.getPreferredName(), PageParams.DEFAULT_FROM), restRequest.paramAsInt(PageParams.SIZE.getPreferredName(), PageParams.DEFAULT_SIZE))); } - return channel -> client.execute(GetDataFrameTransformsAction.INSTANCE, request, new RestToXContentListener<>(channel)); + return channel -> client.execute(GetTransformsAction.INSTANCE, request, new RestToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformStatsAction.java similarity index 59% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformStatsAction.java index 7e96f34fa3f..7fc8d2ba656 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestGetTransformStatsAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.rest.action; +package org.elasticsearch.xpack.transform.rest.action; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; @@ -12,29 +12,29 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.action.util.PageParams; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.action.GetDataFrameTransformsStatsAction; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.action.GetTransformsStatsAction; -import static org.elasticsearch.xpack.core.dataframe.DataFrameField.ALLOW_NO_MATCH; +import static org.elasticsearch.xpack.core.transform.TransformField.ALLOW_NO_MATCH; -public class RestGetDataFrameTransformsStatsAction extends BaseRestHandler { +public class RestGetTransformStatsAction extends BaseRestHandler { - public RestGetDataFrameTransformsStatsAction(RestController controller) { - controller.registerHandler(RestRequest.Method.GET, DataFrameField.REST_BASE_PATH_TRANSFORMS + "_stats", this); - controller.registerHandler(RestRequest.Method.GET, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID + "_stats", this); + public RestGetTransformStatsAction(RestController controller) { + controller.registerHandler(RestRequest.Method.GET, TransformField.REST_BASE_PATH_TRANSFORMS + "_stats", this); + controller.registerHandler(RestRequest.Method.GET, TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID + "_stats", this); } @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - String id = restRequest.param(DataFrameField.ID.getPreferredName()); - GetDataFrameTransformsStatsAction.Request request = new GetDataFrameTransformsStatsAction.Request(id); + String id = restRequest.param(TransformField.ID.getPreferredName()); + GetTransformsStatsAction.Request request = new GetTransformsStatsAction.Request(id); request.setAllowNoMatch(restRequest.paramAsBoolean(ALLOW_NO_MATCH.getPreferredName(), true)); if (restRequest.hasParam(PageParams.FROM.getPreferredName()) || restRequest.hasParam(PageParams.SIZE.getPreferredName())) { request.setPageParams( new PageParams(restRequest.paramAsInt(PageParams.FROM.getPreferredName(), PageParams.DEFAULT_FROM), restRequest.paramAsInt(PageParams.SIZE.getPreferredName(), PageParams.DEFAULT_SIZE))); } - return channel -> client.execute(GetDataFrameTransformsStatsAction.INSTANCE, request, + return channel -> client.execute(GetTransformsStatsAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPreviewDataFrameTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPreviewTransformAction.java similarity index 54% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPreviewDataFrameTransformAction.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPreviewTransformAction.java index 6f8df5cad47..3bba1574a45 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestPreviewDataFrameTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPreviewTransformAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.rest.action; +package org.elasticsearch.xpack.transform.rest.action; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.xcontent.XContentParser; @@ -12,15 +12,15 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAction; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction; import java.io.IOException; -public class RestPreviewDataFrameTransformAction extends BaseRestHandler { +public class RestPreviewTransformAction extends BaseRestHandler { - public RestPreviewDataFrameTransformAction(RestController controller) { - controller.registerHandler(RestRequest.Method.POST, DataFrameField.REST_BASE_PATH + "transforms/_preview", this); + public RestPreviewTransformAction(RestController controller) { + controller.registerHandler(RestRequest.Method.POST, TransformField.REST_BASE_PATH + "transforms/_preview", this); } @Override @@ -32,7 +32,7 @@ public class RestPreviewDataFrameTransformAction extends BaseRestHandler { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { XContentParser parser = restRequest.contentParser(); - PreviewDataFrameTransformAction.Request request = PreviewDataFrameTransformAction.Request.fromXContent(parser); - return channel -> client.execute(PreviewDataFrameTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); + PreviewTransformAction.Request request = PreviewTransformAction.Request.fromXContent(parser); + return channel -> client.execute(PreviewTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPutTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPutTransformAction.java new file mode 100644 index 00000000000..3b87c13f5d2 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestPutTransformAction.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.rest.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.action.PutTransformAction; + +import java.io.IOException; + +public class RestPutTransformAction extends BaseRestHandler { + + public RestPutTransformAction(RestController controller) { + controller.registerHandler(RestRequest.Method.PUT, TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID, this); + } + + @Override + public String getName() { + return "data_frame_put_transform_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String id = restRequest.param(TransformField.ID.getPreferredName()); + XContentParser parser = restRequest.contentParser(); + + boolean deferValidation = restRequest.paramAsBoolean(TransformField.DEFER_VALIDATION.getPreferredName(), false); + PutTransformAction.Request request = PutTransformAction.Request.fromXContent(parser, id, deferValidation); + + return channel -> client.execute(PutTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStartTransformAction.java similarity index 50% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStartTransformAction.java index 44c2c66fbb7..d016f58bb79 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestStartDataFrameTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStartTransformAction.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.rest.action; +package org.elasticsearch.xpack.transform.rest.action; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.client.node.NodeClient; @@ -12,22 +12,21 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformAction; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.action.StartTransformAction; -public class RestStartDataFrameTransformAction extends BaseRestHandler { +public class RestStartTransformAction extends BaseRestHandler { - public RestStartDataFrameTransformAction(RestController controller) { - controller.registerHandler(RestRequest.Method.POST, DataFrameField.REST_BASE_PATH_TRANSFORMS_BY_ID + "_start", this); + public RestStartTransformAction(RestController controller) { + controller.registerHandler(RestRequest.Method.POST, TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID + "_start", this); } @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { - String id = restRequest.param(DataFrameField.ID.getPreferredName()); - boolean force = restRequest.paramAsBoolean(DataFrameField.FORCE.getPreferredName(), false); - StartDataFrameTransformAction.Request request = new StartDataFrameTransformAction.Request(id, force); - request.timeout(restRequest.paramAsTime(DataFrameField.TIMEOUT.getPreferredName(), AcknowledgedRequest.DEFAULT_ACK_TIMEOUT)); - return channel -> client.execute(StartDataFrameTransformAction.INSTANCE, request, + String id = restRequest.param(TransformField.ID.getPreferredName()); + StartTransformAction.Request request = new StartTransformAction.Request(id); + request.timeout(restRequest.paramAsTime(TransformField.TIMEOUT.getPreferredName(), AcknowledgedRequest.DEFAULT_ACK_TIMEOUT)); + return channel -> client.execute(StartTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStopTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStopTransformAction.java new file mode 100644 index 00000000000..49ba802e5b6 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestStopTransformAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.transform.rest.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.action.StopTransformAction; + +public class RestStopTransformAction extends BaseRestHandler { + + public RestStopTransformAction(RestController controller) { + controller.registerHandler(RestRequest.Method.POST, TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID + "_stop", this); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { + String id = restRequest.param(TransformField.ID.getPreferredName()); + TimeValue timeout = restRequest.paramAsTime(TransformField.TIMEOUT.getPreferredName(), + StopTransformAction.DEFAULT_TIMEOUT); + boolean waitForCompletion = restRequest.paramAsBoolean(TransformField.WAIT_FOR_COMPLETION.getPreferredName(), false); + boolean force = restRequest.paramAsBoolean(TransformField.FORCE.getPreferredName(), false); + boolean allowNoMatch = restRequest.paramAsBoolean(TransformField.ALLOW_NO_MATCH.getPreferredName(), false); + + + StopTransformAction.Request request = new StopTransformAction.Request(id, + waitForCompletion, + force, + timeout, + allowNoMatch); + + return channel -> client.execute(StopTransformAction.INSTANCE, request, + new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "data_frame_stop_transform_action"; + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestUpdateTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestUpdateTransformAction.java new file mode 100644 index 00000000000..1e67b8b912e --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/rest/action/RestUpdateTransformAction.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.rest.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.action.UpdateTransformAction; + +import java.io.IOException; + +public class RestUpdateTransformAction extends BaseRestHandler { + + public RestUpdateTransformAction(RestController controller) { + controller.registerHandler(RestRequest.Method.POST, TransformField.REST_BASE_PATH_TRANSFORMS_BY_ID + "_update", this); + } + + @Override + public String getName() { + return "data_frame_update_transform_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + String id = restRequest.param(TransformField.ID.getPreferredName()); + boolean deferValidation = restRequest.paramAsBoolean(TransformField.DEFER_VALIDATION.getPreferredName(), false); + XContentParser parser = restRequest.contentParser(); + UpdateTransformAction.Request request = UpdateTransformAction.Request.fromXContent(parser, id, deferValidation); + + return channel -> client.execute(UpdateTransformAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java similarity index 83% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexer.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java index a64de70dc5f..76f654678ba 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.transform.transforms; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -24,22 +24,22 @@ import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformStoredDoc; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; -import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.indexing.IndexerState; -import org.elasticsearch.xpack.dataframe.checkpoint.CheckpointProvider; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; -import org.elasticsearch.xpack.dataframe.persistence.SeqNoPrimaryTermAndIndex; -import org.elasticsearch.xpack.dataframe.transforms.pivot.AggregationResultUtils; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; +import org.elasticsearch.xpack.core.transform.transforms.TransformState; +import org.elasticsearch.xpack.core.transform.transforms.TransformStoredDoc; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; +import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; +import org.elasticsearch.xpack.transform.checkpoint.CheckpointProvider; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; +import org.elasticsearch.xpack.transform.persistence.SeqNoPrimaryTermAndIndex; +import org.elasticsearch.xpack.transform.transforms.pivot.AggregationResultUtils; import java.time.Instant; import java.util.Map; @@ -47,16 +47,16 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -class ClientDataFrameIndexer extends DataFrameIndexer { +class ClientTransformIndexer extends TransformIndexer { - private static final Logger logger = LogManager.getLogger(ClientDataFrameIndexer.class); + private static final Logger logger = LogManager.getLogger(ClientTransformIndexer.class); private long logEvery = 1; private long logCount = 0; private final Client client; - private final DataFrameTransformsConfigManager transformsConfigManager; + private final TransformConfigManager transformsConfigManager; private final CheckpointProvider checkpointProvider; - private final DataFrameTransformTask transformTask; + private final TransformTask transformTask; private final AtomicInteger failureCount; private volatile boolean auditBulkFailures = true; // Indicates that the source has changed for the current run @@ -66,19 +66,19 @@ class ClientDataFrameIndexer extends DataFrameIndexer { private final AtomicBoolean oldStatsCleanedUp = new AtomicBoolean(false); private volatile Instant changesLastDetectedAt; - ClientDataFrameIndexer(DataFrameTransformsConfigManager transformsConfigManager, + ClientTransformIndexer(TransformConfigManager transformsConfigManager, CheckpointProvider checkpointProvider, AtomicReference initialState, - DataFrameIndexerPosition initialPosition, + TransformIndexerPosition initialPosition, Client client, - DataFrameAuditor auditor, - DataFrameIndexerTransformStats initialStats, - DataFrameTransformConfig transformConfig, + TransformAuditor auditor, + TransformIndexerStats initialStats, + TransformConfig transformConfig, Map fieldMappings, - DataFrameTransformProgress transformProgress, - DataFrameTransformCheckpoint lastCheckpoint, - DataFrameTransformCheckpoint nextCheckpoint, - DataFrameTransformTask parentTask) { + TransformProgress transformProgress, + TransformCheckpoint lastCheckpoint, + TransformCheckpoint nextCheckpoint, + TransformTask parentTask) { super(ExceptionsHelper.requireNonNull(parentTask, "parentTask") .getThreadPool() .executor(ThreadPool.Names.GENERIC), @@ -87,7 +87,7 @@ class ClientDataFrameIndexer extends DataFrameIndexer { fieldMappings, ExceptionsHelper.requireNonNull(initialState, "initialState"), initialPosition, - initialStats == null ? new DataFrameIndexerTransformStats() : initialStats, + initialStats == null ? new TransformIndexerStats() : initialStats, transformProgress, lastCheckpoint, nextCheckpoint); @@ -101,7 +101,7 @@ class ClientDataFrameIndexer extends DataFrameIndexer { @Override protected void onStart(long now, ActionListener listener) { - if (transformTask.getTaskState() == DataFrameTransformTaskState.FAILED) { + if (transformTask.getTaskState() == TransformTaskState.FAILED) { logger.debug("[{}] attempted to start while failed.", getJobId()); listener.onFailure(new ElasticsearchException("Attempted to start a failed transform [{}].", getJobId())); return; @@ -117,7 +117,7 @@ class ClientDataFrameIndexer extends DataFrameIndexer { // If nextCheckpoint > 1, this means that we are now on the checkpoint AFTER the batch checkpoint // Consequently, the idea of percent complete no longer makes sense. if (nextCheckpoint.getCheckpoint() > 1) { - progress = new DataFrameTransformProgress(null, 0L, 0L); + progress = new TransformProgress(null, 0L, 0L); super.onStart(now, listener); return; } @@ -150,12 +150,12 @@ class ClientDataFrameIndexer extends DataFrameIndexer { transformsConfigManager.getTransformConfiguration(getJobId(), ActionListener.wrap( config -> { transformConfig = config; - logger.debug("[{}] successfully refreshed data frame transform config from index.", getJobId()); + logger.debug("[{}] successfully refreshed transform config from index.", getJobId()); updateConfigListener.onResponse(null); }, failure -> { - String msg = DataFrameMessages.getMessage( - DataFrameMessages.FAILED_TO_RELOAD_TRANSFORM_CONFIGURATION, + String msg = TransformMessages.getMessage( + TransformMessages.FAILED_TO_RELOAD_TRANSFORM_CONFIGURATION, getJobId()); logger.error(msg, failure); // If the transform config index or the transform config is gone, something serious occurred @@ -214,7 +214,7 @@ class ClientDataFrameIndexer extends DataFrameIndexer { @Override public synchronized boolean maybeTriggerAsyncJob(long now) { - if (transformTask.getTaskState() == DataFrameTransformTaskState.FAILED) { + if (transformTask.getTaskState() == TransformTaskState.FAILED) { logger.debug("[{}] schedule was triggered for transform but task is failed. Ignoring trigger.", getJobId()); return false; } @@ -231,26 +231,26 @@ class ClientDataFrameIndexer extends DataFrameIndexer { @Override protected void doNextSearch(SearchRequest request, ActionListener nextPhase) { - if (transformTask.getTaskState() == DataFrameTransformTaskState.FAILED) { + if (transformTask.getTaskState() == TransformTaskState.FAILED) { logger.debug("[{}] attempted to search while failed.", getJobId()); nextPhase.onFailure(new ElasticsearchException("Attempted to do a search request for failed transform [{}].", getJobId())); return; } - ClientHelper.executeWithHeadersAsync(transformConfig.getHeaders(), ClientHelper.DATA_FRAME_ORIGIN, client, + ClientHelper.executeWithHeadersAsync(transformConfig.getHeaders(), ClientHelper.TRANSFORM_ORIGIN, client, SearchAction.INSTANCE, request, nextPhase); } @Override protected void doNextBulk(BulkRequest request, ActionListener nextPhase) { - if (transformTask.getTaskState() == DataFrameTransformTaskState.FAILED) { + if (transformTask.getTaskState() == TransformTaskState.FAILED) { logger.debug("[{}] attempted to bulk index while failed.", getJobId()); nextPhase.onFailure(new ElasticsearchException("Attempted to do a bulk index request for failed transform [{}].", getJobId())); return; } ClientHelper.executeWithHeadersAsync(transformConfig.getHeaders(), - ClientHelper.DATA_FRAME_ORIGIN, + ClientHelper.TRANSFORM_ORIGIN, client, BulkAction.INSTANCE, request, @@ -284,8 +284,8 @@ class ClientDataFrameIndexer extends DataFrameIndexer { } @Override - protected void doSaveState(IndexerState indexerState, DataFrameIndexerPosition position, Runnable next) { - if (transformTask.getTaskState() == DataFrameTransformTaskState.FAILED) { + protected void doSaveState(IndexerState indexerState, TransformIndexerPosition position, Runnable next) { + if (transformTask.getTaskState() == TransformTaskState.FAILED) { logger.debug("[{}] attempted to save state and stats while failed.", getJobId()); // If we are failed, we should call next to allow failure handling to occur if necessary. next.run(); @@ -298,14 +298,14 @@ class ClientDataFrameIndexer extends DataFrameIndexer { } // This means that the indexer was triggered to discover changes, found none, and exited early. - // If the state is `STOPPED` this means that DataFrameTransformTask#stop was called while we were checking for changes. + // If the state is `STOPPED` this means that TransformTask#stop was called while we were checking for changes. // Allow the stop call path to continue if (hasSourceChanged == false && indexerState.equals(IndexerState.STOPPED) == false) { next.run(); return; } - DataFrameTransformTaskState taskState = transformTask.getTaskState(); + TransformTaskState taskState = transformTask.getTaskState(); if (indexerState.equals(IndexerState.STARTED) && transformTask.getCheckpoint() == 1 @@ -313,8 +313,8 @@ class ClientDataFrameIndexer extends DataFrameIndexer { // set both to stopped so they are persisted as such indexerState = IndexerState.STOPPED; - auditor.info(transformConfig.getId(), "Data frame finished indexing all data, initiating stop"); - logger.info("[{}] data frame transform finished indexing all data, initiating stop.", transformConfig.getId()); + auditor.info(transformConfig.getId(), "Transform finished indexing all data, initiating stop"); + logger.info("[{}] transform finished indexing all data, initiating stop.", transformConfig.getId()); } // If we are `STOPPED` on a `doSaveState` call, that indicates we transitioned to `STOPPED` from `STOPPING` @@ -323,10 +323,10 @@ class ClientDataFrameIndexer extends DataFrameIndexer { if (indexerState.equals(IndexerState.STOPPED)) { // We don't want adjust the stored taskState because as soon as it is `STOPPED` a user could call // .start again. - taskState = DataFrameTransformTaskState.STOPPED; + taskState = TransformTaskState.STOPPED; } - final DataFrameTransformState state = new DataFrameTransformState( + final TransformState state = new TransformState( taskState, indexerState, position, @@ -342,13 +342,13 @@ class ClientDataFrameIndexer extends DataFrameIndexer { // called is controlled by AsyncTwoPhaseIndexer#onBulkResponse which calls doSaveState every so // often when doing bulk indexing calls or at the end of one indexing run. transformsConfigManager.putOrUpdateTransformStoredDoc( - new DataFrameTransformStoredDoc(getJobId(), state, getStats()), + new TransformStoredDoc(getJobId(), state, getStats()), seqNoPrimaryTermAndIndex, ActionListener.wrap( r -> { transformTask.updateSeqNoPrimaryTermAndIndex(seqNoPrimaryTermAndIndex, r); // for auto stop shutdown the task - if (state.getTaskState().equals(DataFrameTransformTaskState.STOPPED)) { + if (state.getTaskState().equals(TransformTaskState.STOPPED)) { transformTask.shutdown(); } // Only do this clean up once, if it succeeded, no reason to do the query again. @@ -378,7 +378,7 @@ class ClientDataFrameIndexer extends DataFrameIndexer { auditor.warning(getJobId(), "Failure updating stats of transform: " + statsExc.getMessage()); // for auto stop shutdown the task - if (state.getTaskState().equals(DataFrameTransformTaskState.STOPPED)) { + if (state.getTaskState().equals(TransformTaskState.STOPPED)) { transformTask.shutdown(); } next.run(); @@ -393,7 +393,7 @@ class ClientDataFrameIndexer extends DataFrameIndexer { handleFailure(exc); } catch (Exception e) { logger.error( - new ParameterizedMessage("[{}] data frame transform encountered an unexpected internal exception: ", getJobId()), + new ParameterizedMessage("[{}] transform encountered an unexpected internal exception: ", getJobId()), e); } } @@ -442,10 +442,10 @@ class ClientDataFrameIndexer extends DataFrameIndexer { } if (shouldAuditOnFinish(checkpoint)) { auditor.info(getJobId(), - "Finished indexing for data frame transform checkpoint [" + checkpoint + "]."); + "Finished indexing for transform checkpoint [" + checkpoint + "]."); } logger.debug( - "[{}] finished indexing for data frame transform checkpoint [{}].", getJobId(), checkpoint); + "[{}] finished indexing for transform checkpoint [{}].", getJobId(), checkpoint); auditBulkFailures = true; listener.onResponse(null); } catch (Exception e) { @@ -478,19 +478,19 @@ class ClientDataFrameIndexer extends DataFrameIndexer { @Override protected void onStop() { - auditor.info(transformConfig.getId(), "Data frame transform has stopped."); - logger.info("[{}] data frame transform has stopped.", transformConfig.getId()); + auditor.info(transformConfig.getId(), "Transform has stopped."); + logger.info("[{}] transform has stopped.", transformConfig.getId()); } @Override protected void onAbort() { - auditor.info(transformConfig.getId(), "Received abort request, stopping data frame transform."); - logger.info("[{}] data frame transform received abort request. Stopping indexer.", transformConfig.getId()); + auditor.info(transformConfig.getId(), "Received abort request, stopping transform."); + logger.info("[{}] transform received abort request. Stopping indexer.", transformConfig.getId()); transformTask.shutdown(); } @Override - protected void createCheckpoint(ActionListener listener) { + protected void createCheckpoint(ActionListener listener) { checkpointProvider.createNextCheckpoint(getLastCheckpoint(), ActionListener.wrap( checkpoint -> transformsConfigManager.putTransformCheckpoint(checkpoint, ActionListener.wrap( @@ -524,11 +524,11 @@ class ClientDataFrameIndexer extends DataFrameIndexer { e -> { logger.warn( new ParameterizedMessage( - "[{}] failed to detect changes for data frame transform. Skipping update till next check.", + "[{}] failed to detect changes for transform. Skipping update till next check.", getJobId()), e); auditor.warning(getJobId(), - "Failed to detect changes for data frame transform, skipping update till next check. Exception: " + "Failed to detect changes for transform, skipping update till next check. Exception: " + e.getMessage()); hasChangedListener.onResponse(false); })); @@ -541,7 +541,7 @@ class ClientDataFrameIndexer extends DataFrameIndexer { } synchronized void handleFailure(Exception e) { - logger.warn(new ParameterizedMessage("[{}] data frame transform encountered an exception: ", + logger.warn(new ParameterizedMessage("[{}] transform encountered an exception: ", getJobId()), e); if (handleCircuitBreakingException(e)) { @@ -558,7 +558,7 @@ class ClientDataFrameIndexer extends DataFrameIndexer { // times in a row, very quickly. We do not want to spam the audit log with repeated failures, so only record the first one if (e.getMessage().equals(lastAuditedExceptionMessage) == false) { auditor.warning(getJobId(), - "Data frame transform encountered an exception: " + e.getMessage() + + "Transform encountered an exception: " + e.getMessage() + " Will attempt again at next scheduled trigger."); lastAuditedExceptionMessage = e.getMessage(); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerBuilder.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerBuilder.java new file mode 100644 index 00000000000..f60bada209f --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerBuilder.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.transforms; + +import org.elasticsearch.client.Client; +import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; +import org.elasticsearch.xpack.transform.checkpoint.CheckpointProvider; +import org.elasticsearch.xpack.transform.checkpoint.TransformCheckpointService; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; + +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +class ClientTransformIndexerBuilder { + private Client client; + private TransformConfigManager transformsConfigManager; + private TransformCheckpointService transformsCheckpointService; + private TransformAuditor auditor; + private Map fieldMappings; + private TransformConfig transformConfig; + private TransformIndexerStats initialStats; + private IndexerState indexerState = IndexerState.STOPPED; + private TransformIndexerPosition initialPosition; + private TransformProgress progress; + private TransformCheckpoint lastCheckpoint; + private TransformCheckpoint nextCheckpoint; + + ClientTransformIndexerBuilder() { + this.initialStats = new TransformIndexerStats(); + } + + ClientTransformIndexer build(TransformTask parentTask) { + CheckpointProvider checkpointProvider = transformsCheckpointService.getCheckpointProvider(transformConfig); + + return new ClientTransformIndexer(this.transformsConfigManager, + checkpointProvider, + new AtomicReference<>(this.indexerState), + this.initialPosition, + this.client, + this.auditor, + this.initialStats, + this.transformConfig, + this.fieldMappings, + this.progress, + this.lastCheckpoint, + this.nextCheckpoint, + parentTask); + } + + ClientTransformIndexerBuilder setClient(Client client) { + this.client = client; + return this; + } + + ClientTransformIndexerBuilder setTransformsConfigManager(TransformConfigManager transformsConfigManager) { + this.transformsConfigManager = transformsConfigManager; + return this; + } + + ClientTransformIndexerBuilder setTransformsCheckpointService(TransformCheckpointService transformsCheckpointService) { + this.transformsCheckpointService = transformsCheckpointService; + return this; + } + + ClientTransformIndexerBuilder setAuditor(TransformAuditor auditor) { + this.auditor = auditor; + return this; + } + + ClientTransformIndexerBuilder setFieldMappings(Map fieldMappings) { + this.fieldMappings = fieldMappings; + return this; + } + + ClientTransformIndexerBuilder setTransformConfig(TransformConfig transformConfig) { + this.transformConfig = transformConfig; + return this; + } + + TransformConfig getTransformConfig() { + return this.transformConfig; + } + + ClientTransformIndexerBuilder setInitialStats(TransformIndexerStats initialStats) { + this.initialStats = initialStats; + return this; + } + + ClientTransformIndexerBuilder setIndexerState(IndexerState indexerState) { + this.indexerState = indexerState; + return this; + } + + ClientTransformIndexerBuilder setInitialPosition(TransformIndexerPosition initialPosition) { + this.initialPosition = initialPosition; + return this; + } + + ClientTransformIndexerBuilder setProgress(TransformProgress progress) { + this.progress = progress; + return this; + } + + ClientTransformIndexerBuilder setLastCheckpoint(TransformCheckpoint lastCheckpoint) { + this.lastCheckpoint = lastCheckpoint; + return this; + } + + ClientTransformIndexerBuilder setNextCheckpoint(TransformCheckpoint nextCheckpoint) { + this.nextCheckpoint = nextCheckpoint; + return this; + } +} \ No newline at end of file diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/IDGenerator.java similarity index 98% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/IDGenerator.java index 6d6f8455851..c0ebe325494 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/IDGenerator.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.transform.transforms; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.Numbers; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/SourceDestValidator.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/SourceDestValidator.java similarity index 78% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/SourceDestValidator.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/SourceDestValidator.java index 3f5ae039a9a..8c89ffd6d5f 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/SourceDestValidator.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/SourceDestValidator.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.transform.transforms; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.support.IndicesOptions; @@ -13,8 +13,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import java.util.Arrays; import java.util.HashSet; @@ -22,14 +22,14 @@ import java.util.List; import java.util.Set; /** - * This class contains more complex validations in regards to how {@link DataFrameTransformConfig#getSource()} and - * {@link DataFrameTransformConfig#getDestination()} relate to each other. + * This class contains more complex validations in regards to how {@link TransformConfig#getSource()} and + * {@link TransformConfig#getDestination()} relate to each other. */ public final class SourceDestValidator { interface SourceDestValidation { boolean isDeferrable(); - void validate(DataFrameTransformConfig config, ClusterState clusterState, IndexNameExpressionResolver indexNameExpressionResolver); + void validate(TransformConfig config, ClusterState clusterState, IndexNameExpressionResolver indexNameExpressionResolver); } private static final List VALIDATIONS = Arrays.asList(new SourceMissingValidation(), @@ -39,8 +39,8 @@ public final class SourceDestValidator { /** * Validates the DataFrameTransformConfiguration source and destination indices. * - * A simple name validation is done on {@link DataFrameTransformConfig#getDestination()} inside - * {@link org.elasticsearch.xpack.core.dataframe.action.PutDataFrameTransformAction} + * A simple name validation is done on {@link TransformConfig#getDestination()} inside + * {@link org.elasticsearch.xpack.core.transform.action.PutTransformAction} * * So, no need to do the name checks here. * @@ -49,7 +49,7 @@ public final class SourceDestValidator { * @param indexNameExpressionResolver A valid IndexNameExpressionResolver object * @throws ElasticsearchStatusException when a validation fails */ - public static void validate(DataFrameTransformConfig config, + public static void validate(TransformConfig config, ClusterState clusterState, IndexNameExpressionResolver indexNameExpressionResolver, boolean shouldDefer) { @@ -69,7 +69,7 @@ public final class SourceDestValidator { } @Override - public void validate(DataFrameTransformConfig config, + public void validate(TransformConfig config, ClusterState clusterState, IndexNameExpressionResolver indexNameExpressionResolver) { for(String src : config.getSource().getIndex()) { @@ -78,7 +78,7 @@ public final class SourceDestValidator { src); if (concreteNames.length == 0) { throw new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_SOURCE_INDEX_MISSING, src), + TransformMessages.getMessage(TransformMessages.REST_PUT_TRANSFORM_SOURCE_INDEX_MISSING, src), RestStatus.BAD_REQUEST); } } @@ -93,7 +93,7 @@ public final class SourceDestValidator { } @Override - public void validate(DataFrameTransformConfig config, + public void validate(TransformConfig config, ClusterState clusterState, IndexNameExpressionResolver indexNameExpressionResolver) { final String destIndex = config.getDestination().getIndex(); @@ -104,7 +104,7 @@ public final class SourceDestValidator { src); if (Regex.simpleMatch(src, destIndex)) { throw new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_IN_SOURCE, destIndex, src), + TransformMessages.getMessage(TransformMessages.REST_PUT_TRANSFORM_DEST_IN_SOURCE, destIndex, src), RestStatus.BAD_REQUEST); } concreteSourceIndexNames.addAll(Arrays.asList(concreteNames)); @@ -112,7 +112,7 @@ public final class SourceDestValidator { if (concreteSourceIndexNames.contains(destIndex)) { throw new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_IN_SOURCE, + TransformMessages.getMessage(TransformMessages.REST_PUT_TRANSFORM_DEST_IN_SOURCE, destIndex, Strings.arrayToCommaDelimitedString(config.getSource().getIndex())), RestStatus.BAD_REQUEST @@ -124,7 +124,7 @@ public final class SourceDestValidator { destIndex); if (concreteDest.length > 0 && concreteSourceIndexNames.contains(concreteDest[0])) { throw new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_IN_SOURCE, + TransformMessages.getMessage(TransformMessages.REST_PUT_TRANSFORM_DEST_IN_SOURCE, concreteDest[0], Strings.arrayToCommaDelimitedString(concreteSourceIndexNames.toArray(new String[0]))), RestStatus.BAD_REQUEST @@ -141,7 +141,7 @@ public final class SourceDestValidator { } @Override - public void validate(DataFrameTransformConfig config, + public void validate(TransformConfig config, ClusterState clusterState, IndexNameExpressionResolver indexNameExpressionResolver) { final String destIndex = config.getDestination().getIndex(); @@ -150,7 +150,7 @@ public final class SourceDestValidator { if (concreteDest.length > 1) { throw new ElasticsearchStatusException( - DataFrameMessages.getMessage(DataFrameMessages.REST_PUT_DATA_FRAME_DEST_SINGLE_INDEX, destIndex), + TransformMessages.getMessage(TransformMessages.REST_PUT_TRANSFORM_DEST_SINGLE_INDEX, destIndex), RestStatus.BAD_REQUEST ); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java similarity index 83% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java index bb03df373cd..eef481213b3 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.transform.transforms; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -24,19 +24,19 @@ import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; -import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.indexing.IterationResult; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; +import org.elasticsearch.xpack.core.transform.utils.ExceptionsHelper; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.transforms.pivot.Pivot; import java.io.IOException; import java.io.UncheckedIOException; @@ -51,7 +51,7 @@ import java.util.stream.Stream; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer { +public abstract class TransformIndexer extends AsyncTwoPhaseIndexer { /** * RunState is an internal (non-persisted) state that controls the internal logic @@ -70,19 +70,19 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer fieldMappings; private Pivot pivot; private int pageSize = 0; - protected volatile DataFrameTransformCheckpoint lastCheckpoint; - protected volatile DataFrameTransformCheckpoint nextCheckpoint; + protected volatile TransformCheckpoint lastCheckpoint; + protected volatile TransformCheckpoint nextCheckpoint; private volatile RunState runState; @@ -90,16 +90,16 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer> changedBuckets; private volatile Map changedBucketsAfterKey; - public DataFrameIndexer(Executor executor, - DataFrameAuditor auditor, - DataFrameTransformConfig transformConfig, + public TransformIndexer(Executor executor, + TransformAuditor auditor, + TransformConfig transformConfig, Map fieldMappings, AtomicReference initialState, - DataFrameIndexerPosition initialPosition, - DataFrameIndexerTransformStats jobStats, - DataFrameTransformProgress transformProgress, - DataFrameTransformCheckpoint lastCheckpoint, - DataFrameTransformCheckpoint nextCheckpoint) { + TransformIndexerPosition initialPosition, + TransformIndexerStats jobStats, + TransformProgress transformProgress, + TransformCheckpoint lastCheckpoint, + TransformCheckpoint nextCheckpoint) { super(executor, initialState, initialPosition, jobStats); this.auditor = Objects.requireNonNull(auditor); this.transformConfig = ExceptionsHelper.requireNonNull(transformConfig, "transformConfig"); @@ -122,7 +122,7 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer listener); + protected abstract void createCheckpoint(ActionListener listener); @Override protected void onStart(long now, ActionListener listener) { @@ -181,7 +181,7 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer doProcess(SearchResponse searchResponse) { + protected IterationResult doProcess(SearchResponse searchResponse) { final Aggregations aggregations = searchResponse.getAggregations(); // Treat this as a "we reached the end". // This should only happen when all underlying indices have gone away. Consequently, there is no more data to read. @@ -212,7 +212,7 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer processBuckets(final CompositeAggregation agg) { + private IterationResult processBuckets(final CompositeAggregation agg) { // we reached the end if (agg.getBuckets().isEmpty()) { return new IterationResult<>(Collections.emptyList(), null, true); @@ -220,11 +220,11 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer result = new IterationResult<>( + IterationResult result = new IterationResult<>( processBucketsToIndexRequests(agg).collect(Collectors.toList()), newPosition, agg.getBuckets().isEmpty()); @@ -238,7 +238,7 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer processPartialBucketUpdates(final CompositeAggregation agg) { + private IterationResult processPartialBucketUpdates(final CompositeAggregation agg) { // we reached the end if (agg.getBuckets().isEmpty()) { // cleanup changed Buckets @@ -248,14 +248,14 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer(Collections.emptyList(), - new DataFrameIndexerPosition(null, changedBucketsAfterKey), false); + new TransformIndexerPosition(null, changedBucketsAfterKey), false); } return processBuckets(agg); } - private IterationResult processChangedBuckets(final CompositeAggregation agg) { + private IterationResult processChangedBuckets(final CompositeAggregation agg) { // initialize the map of changed buckets, the map might be empty if source do not require/implement // changed bucket detection changedBuckets = pivot.initialIncrementalBucketUpdateMap(); @@ -294,11 +294,11 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer processBucketsToIndexRequests(CompositeAggregation agg) { - final DataFrameTransformConfig transformConfig = getConfig(); + final TransformConfig transformConfig = getConfig(); String indexName = transformConfig.getDestination().getIndex(); return pivot.extractResults(agg, getFieldMappings(), getStats()).map(document -> { - String id = (String) document.get(DataFrameField.DOCUMENT_ID_FIELD); + String id = (String) document.get(TransformField.DOCUMENT_ID_FIELD); if (id == null) { throw new RuntimeException("Expected a document id but got null."); @@ -332,7 +332,7 @@ public abstract class DataFrameIndexer extends AsyncTwoPhaseIndexer { +public class TransformPersistentTasksExecutor extends PersistentTasksExecutor { - private static final Logger logger = LogManager.getLogger(DataFrameTransformPersistentTasksExecutor.class); + private static final Logger logger = LogManager.getLogger(TransformPersistentTasksExecutor.class); // The amount of time we wait for the cluster state to respond when being marked as failed private static final int MARK_AS_FAILED_TIMEOUT_SEC = 90; private final Client client; - private final DataFrameTransformsConfigManager transformsConfigManager; - private final DataFrameTransformsCheckpointService dataFrameTransformsCheckpointService; + private final TransformConfigManager transformsConfigManager; + private final TransformCheckpointService transformCheckpointService; private final SchedulerEngine schedulerEngine; private final ThreadPool threadPool; - private final DataFrameAuditor auditor; + private final ClusterService clusterService; + private final TransformAuditor auditor; private volatile int numFailureRetries; - public DataFrameTransformPersistentTasksExecutor(Client client, - DataFrameTransformsConfigManager transformsConfigManager, - DataFrameTransformsCheckpointService dataFrameTransformsCheckpointService, - SchedulerEngine schedulerEngine, - DataFrameAuditor auditor, - ThreadPool threadPool, - ClusterService clusterService, - Settings settings) { - super(DataFrameField.TASK_NAME, DataFrame.TASK_THREAD_POOL_NAME); + public TransformPersistentTasksExecutor(Client client, + TransformConfigManager transformsConfigManager, + TransformCheckpointService transformsCheckpointService, + SchedulerEngine schedulerEngine, + TransformAuditor auditor, + ThreadPool threadPool, + ClusterService clusterService, + Settings settings) { + super(TransformField.TASK_NAME, Transform.TASK_THREAD_POOL_NAME); this.client = client; this.transformsConfigManager = transformsConfigManager; - this.dataFrameTransformsCheckpointService = dataFrameTransformsCheckpointService; + this.transformCheckpointService = transformsCheckpointService; this.schedulerEngine = schedulerEngine; this.auditor = auditor; this.threadPool = threadPool; - this.numFailureRetries = DataFrameTransformTask.NUM_FAILURE_RETRIES_SETTING.get(settings); + this.clusterService = clusterService; + this.numFailureRetries = TransformTask.NUM_FAILURE_RETRIES_SETTING.get(settings); clusterService.getClusterSettings() - .addSettingsUpdateConsumer(DataFrameTransformTask.NUM_FAILURE_RETRIES_SETTING, this::setNumFailureRetries); + .addSettingsUpdateConsumer(TransformTask.NUM_FAILURE_RETRIES_SETTING, this::setNumFailureRetries); } @Override - public PersistentTasksCustomMetaData.Assignment getAssignment(DataFrameTransform params, ClusterState clusterState) { + public PersistentTasksCustomMetaData.Assignment getAssignment(TransformTaskParams params, ClusterState clusterState) { List unavailableIndices = verifyIndicesPrimaryShardsAreActive(clusterState); if (unavailableIndices.size() != 0) { - String reason = "Not starting data frame transform [" + params.getId() + "], " + + String reason = "Not starting transform [" + params.getId() + "], " + "because not all primary shards are active for the following indices [" + String.join(",", unavailableIndices) + "]"; logger.debug(reason); @@ -106,7 +108,7 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(); String[] indices = resolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), - DataFrameInternalIndex.INDEX_NAME_PATTERN); + TransformInternalIndex.INDEX_NAME_PATTERN); List unavailableIndices = new ArrayList<>(indices.length); for (String index : indices) { IndexRoutingTable routingTable = clusterState.getRoutingTable().index(index); @@ -118,34 +120,38 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx } @Override - protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTransform params, PersistentTaskState state) { + protected void nodeOperation(AllocatedPersistentTask task, @Nullable TransformTaskParams params, PersistentTaskState state) { final String transformId = params.getId(); - final DataFrameTransformTask buildTask = (DataFrameTransformTask) task; - // NOTE: DataFrameTransformPersistentTasksExecutor#createTask pulls in the stored task state from the ClusterState when the object - // is created. DataFrameTransformTask#ctor takes into account setting the task as failed if that is passed in with the + final TransformTask buildTask = (TransformTask) task; + // NOTE: TransformPersistentTasksExecutor#createTask pulls in the stored task state from the ClusterState when the object + // is created. TransformTask#ctor takes into account setting the task as failed if that is passed in with the // persisted state. - // DataFrameTransformPersistentTasksExecutor#startTask will fail as DataFrameTransformTask#start, when force == false, will return + // TransformPersistentTasksExecutor#startTask will fail as TransformTask#start, when force == false, will return // a failure indicating that a failed task cannot be started. // // We want the rest of the state to be populated in the task when it is loaded on the node so that users can force start it again // later if they want. - final ClientDataFrameIndexerBuilder indexerBuilder = - new ClientDataFrameIndexerBuilder() + final ClientTransformIndexerBuilder indexerBuilder = + new ClientTransformIndexerBuilder() .setAuditor(auditor) .setClient(client) - .setTransformsCheckpointService(dataFrameTransformsCheckpointService) + .setTransformsCheckpointService(transformCheckpointService) .setTransformsConfigManager(transformsConfigManager); - final SetOnce stateHolder = new SetOnce<>(); + final SetOnce stateHolder = new SetOnce<>(); - ActionListener startTaskListener = ActionListener.wrap( - response -> logger.info("Successfully completed and scheduled task in node operation"), - failure -> logger.error("Failed to start task ["+ transformId +"] in node operation", failure) + ActionListener startTaskListener = ActionListener.wrap( + response -> logger.info("[{}] successfully completed and scheduled task in node operation", transformId), + failure -> { + auditor.error(transformId, "Failed to start transform. " + + "Please stop and attempt to start again. Failure: " + failure.getMessage()); + logger.error("Failed to start task ["+ transformId +"] in node operation", failure); + } ); - // <5> load next checkpoint - ActionListener getTransformNextCheckpointListener = ActionListener.wrap( + // <7> load next checkpoint + ActionListener getTransformNextCheckpointListener = ActionListener.wrap( nextCheckpoint -> { if (nextCheckpoint.isEmpty()) { @@ -165,14 +171,14 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx }, error -> { // TODO: do not use the same error message as for loading the last checkpoint - String msg = DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_LOAD_TRANSFORM_CHECKPOINT, transformId); + String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_LOAD_TRANSFORM_CHECKPOINT, transformId); logger.error(msg, error); markAsFailed(buildTask, msg); } ); - // <4> load last checkpoint - ActionListener getTransformLastCheckpointListener = ActionListener.wrap( + // <6> load last checkpoint + ActionListener getTransformLastCheckpointListener = ActionListener.wrap( lastCheckpoint -> { indexerBuilder.setLastCheckpoint(lastCheckpoint); @@ -182,18 +188,18 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx getTransformNextCheckpointListener); }, error -> { - String msg = DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_LOAD_TRANSFORM_CHECKPOINT, transformId); + String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_LOAD_TRANSFORM_CHECKPOINT, transformId); logger.error(msg, error); markAsFailed(buildTask, msg); } ); - // <3> Set the previous stats (if they exist), initialize the indexer, start the task (If it is STOPPED) + // <5> Set the previous stats (if they exist), initialize the indexer, start the task (If it is STOPPED) // Since we don't create the task until `_start` is called, if we see that the task state is stopped, attempt to start // Schedule execution regardless - ActionListener> transformStatsActionListener = ActionListener.wrap( + ActionListener> transformStatsActionListener = ActionListener.wrap( stateAndStatsAndSeqNoPrimaryTermAndIndex -> { - DataFrameTransformStoredDoc stateAndStats = stateAndStatsAndSeqNoPrimaryTermAndIndex.v1(); + TransformStoredDoc stateAndStats = stateAndStatsAndSeqNoPrimaryTermAndIndex.v1(); SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex = stateAndStatsAndSeqNoPrimaryTermAndIndex.v2(); // Since we have not set the value for this yet, it SHOULD be null buildTask.updateSeqNoPrimaryTermAndIndex(null, seqNoPrimaryTermAndIndex); @@ -220,7 +226,7 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx }, error -> { if (error instanceof ResourceNotFoundException == false) { - String msg = DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_LOAD_TRANSFORM_STATE, transformId); + String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_LOAD_TRANSFORM_STATE, transformId); logger.error(msg, error); markAsFailed(buildTask, msg); } else { @@ -230,42 +236,53 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx } ); - // <2> set fieldmappings for the indexer, get the previous stats (if they exist) + // <4> set fieldmappings for the indexer, get the previous stats (if they exist) ActionListener> getFieldMappingsListener = ActionListener.wrap( fieldMappings -> { indexerBuilder.setFieldMappings(fieldMappings); transformsConfigManager.getTransformStoredDoc(transformId, transformStatsActionListener); }, error -> { - String msg = DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_UNABLE_TO_GATHER_FIELD_MAPPINGS, + String msg = TransformMessages.getMessage(TransformMessages.UNABLE_TO_GATHER_FIELD_MAPPINGS, indexerBuilder.getTransformConfig().getDestination().getIndex()); logger.error(msg, error); markAsFailed(buildTask, msg); } ); - // <1> Validate the transform, assigning it to the indexer, and get the field mappings - ActionListener getTransformConfigListener = ActionListener.wrap( + // <3> Validate the transform, assigning it to the indexer, and get the field mappings + ActionListener getTransformConfigListener = ActionListener.wrap( config -> { if (config.isValid()) { indexerBuilder.setTransformConfig(config); SchemaUtil.getDestinationFieldMappings(client, config.getDestination().getIndex(), getFieldMappingsListener); } else { markAsFailed(buildTask, - DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_TRANSFORM_CONFIGURATION_INVALID, transformId)); + TransformMessages.getMessage(TransformMessages.TRANSFORM_CONFIGURATION_INVALID, transformId)); } }, error -> { - String msg = DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_LOAD_TRANSFORM_CONFIGURATION, transformId); + String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_LOAD_TRANSFORM_CONFIGURATION, transformId); logger.error(msg, error); markAsFailed(buildTask, msg); } ); - // <0> Get the transform config - transformsConfigManager.getTransformConfiguration(transformId, getTransformConfigListener); + + // <2> Get the transform config + ActionListener templateCheckListener = ActionListener.wrap( + aVoid -> transformsConfigManager.getTransformConfiguration(transformId, getTransformConfigListener), + error -> { + String msg = "Failed to create internal index mappings"; + logger.error(msg, error); + markAsFailed(buildTask, msg); + } + ); + + // <1> Check the internal index template is installed + TransformInternalIndex.installLatestVersionedIndexTemplateIfRequired(clusterService, client, templateCheckListener); } - private static IndexerState currentIndexerState(DataFrameTransformState previousState) { + private static IndexerState currentIndexerState(TransformState previousState) { if (previousState == null) { return IndexerState.STOPPED; } @@ -285,7 +302,7 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx } } - private void markAsFailed(DataFrameTransformTask task, String reason) { + private void markAsFailed(TransformTask task, String reason) { CountDownLatch latch = new CountDownLatch(1); task.markAsFailed(reason, new LatchedActionListener<>(ActionListener.wrap( @@ -299,14 +316,13 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx } } - private void startTask(DataFrameTransformTask buildTask, - ClientDataFrameIndexerBuilder indexerBuilder, + private void startTask(TransformTask buildTask, + ClientTransformIndexerBuilder indexerBuilder, Long previousCheckpoint, - ActionListener listener) { + ActionListener listener) { buildTask.initializeIndexer(indexerBuilder); - // DataFrameTransformTask#start will fail if the task state is FAILED - // Will continue to attempt to start the indexer, even if the state is STARTED - buildTask.setNumFailureRetries(numFailureRetries).start(previousCheckpoint, false, false, listener); + // TransformTask#start will fail if the task state is FAILED + buildTask.setNumFailureRetries(numFailureRetries).start(previousCheckpoint, listener); } private void setNumFailureRetries(int numFailureRetries) { @@ -315,8 +331,8 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx @Override protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetaData.PersistentTask persistentTask, Map headers) { - return new DataFrameTransformTask(id, type, action, parentTaskId, persistentTask.getParams(), - (DataFrameTransformState) persistentTask.getState(), schedulerEngine, auditor, threadPool, headers); + PersistentTasksCustomMetaData.PersistentTask persistentTask, Map headers) { + return new TransformTask(id, type, action, parentTaskId, persistentTask.getParams(), + (TransformState) persistentTask.getState(), schedulerEngine, auditor, threadPool, headers); } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/TransformProgressGatherer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformProgressGatherer.java similarity index 75% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/TransformProgressGatherer.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformProgressGatherer.java index a7c37f7c2e4..93bf7ca3d01 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/TransformProgressGatherer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformProgressGatherer.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.transform.transforms; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchAction; @@ -16,8 +16,8 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; import java.util.function.Function; @@ -36,23 +36,23 @@ public final class TransformProgressGatherer { */ public static void getInitialProgress(Client client, QueryBuilder filterQuery, - DataFrameTransformConfig config, - ActionListener progressListener) { + TransformConfig config, + ActionListener progressListener) { SearchRequest request = getSearchRequest(config, filterQuery); ActionListener searchResponseActionListener = ActionListener.wrap( - searchResponse -> progressListener.onResponse(searchResponseToDataFrameTransformProgressFunction().apply(searchResponse)), + searchResponse -> progressListener.onResponse(searchResponseToTransformProgressFunction().apply(searchResponse)), progressListener::onFailure ); ClientHelper.executeWithHeadersAsync(config.getHeaders(), - ClientHelper.DATA_FRAME_ORIGIN, + ClientHelper.TRANSFORM_ORIGIN, client, SearchAction.INSTANCE, request, searchResponseActionListener); } - public static SearchRequest getSearchRequest(DataFrameTransformConfig config, QueryBuilder filteredQuery) { + public static SearchRequest getSearchRequest(TransformConfig config, QueryBuilder filteredQuery) { SearchRequest request = new SearchRequest(config.getSource().getIndex()); request.allowPartialSearchResults(false); BoolQueryBuilder existsClauses = QueryBuilders.boolQuery(); @@ -72,7 +72,7 @@ public final class TransformProgressGatherer { return request; } - public static Function searchResponseToDataFrameTransformProgressFunction() { - return searchResponse -> new DataFrameTransformProgress(searchResponse.getHits().getTotalHits().value, 0L, 0L); + public static Function searchResponseToTransformProgressFunction() { + return searchResponse -> new TransformProgress(searchResponse.getHits().getTotalHits().value, 0L, 0L); } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java similarity index 71% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java index a5d0bfd2ff9..75054e949bf 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.transform.transforms; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -21,79 +21,78 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; -import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction.Response; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine.Event; -import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.SeqNoPrimaryTermAndIndex; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.action.StartTransformAction; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointingInfo; +import org.elasticsearch.xpack.core.transform.transforms.TransformState; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; +import org.elasticsearch.xpack.transform.checkpoint.TransformCheckpointService; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.SeqNoPrimaryTermAndIndex; import java.util.Arrays; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; -import static org.elasticsearch.xpack.core.dataframe.DataFrameMessages.DATA_FRAME_CANNOT_START_FAILED_TRANSFORM; -import static org.elasticsearch.xpack.core.dataframe.DataFrameMessages.DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM; +import static org.elasticsearch.xpack.core.transform.TransformMessages.CANNOT_START_FAILED_TRANSFORM; +import static org.elasticsearch.xpack.core.transform.TransformMessages.CANNOT_STOP_FAILED_TRANSFORM; -public class DataFrameTransformTask extends AllocatedPersistentTask implements SchedulerEngine.Listener { +public class TransformTask extends AllocatedPersistentTask implements SchedulerEngine.Listener { // Default interval the scheduler sends an event if the config does not specify a frequency private static final long SCHEDULER_NEXT_MILLISECONDS = 60000; - private static final Logger logger = LogManager.getLogger(DataFrameTransformTask.class); + private static final Logger logger = LogManager.getLogger(TransformTask.class); private static final int DEFAULT_FAILURE_RETRIES = 10; private volatile int numFailureRetries = DEFAULT_FAILURE_RETRIES; // How many times the transform task can retry on an non-critical failure public static final Setting NUM_FAILURE_RETRIES_SETTING = Setting.intSetting( - "xpack.data_frame.num_transform_failure_retries", + "xpack.transform.num_transform_failure_retries", DEFAULT_FAILURE_RETRIES, 0, 100, Setting.Property.NodeScope, Setting.Property.Dynamic); private static final IndexerState[] RUNNING_STATES = new IndexerState[]{IndexerState.STARTED, IndexerState.INDEXING}; - public static final String SCHEDULE_NAME = DataFrameField.TASK_NAME + "/schedule"; + public static final String SCHEDULE_NAME = TransformField.TASK_NAME + "/schedule"; - private final DataFrameTransform transform; + private final TransformTaskParams transform; private final SchedulerEngine schedulerEngine; private final ThreadPool threadPool; - private final DataFrameAuditor auditor; - private final DataFrameIndexerPosition initialPosition; + private final TransformAuditor auditor; + private final TransformIndexerPosition initialPosition; private final IndexerState initialIndexerState; - private final SetOnce indexer = new SetOnce<>(); + private final SetOnce indexer = new SetOnce<>(); - private final AtomicReference taskState; + private final AtomicReference taskState; private final AtomicReference stateReason; private final AtomicReference seqNoPrimaryTermAndIndex = new AtomicReference<>(null); - // the checkpoint of this data frame, storing the checkpoint until data indexing from source to dest is _complete_ + // the checkpoint of this transform, storing the checkpoint until data indexing from source to dest is _complete_ // Note: Each indexer run creates a new future checkpoint which becomes the current checkpoint only after the indexer run finished private final AtomicLong currentCheckpoint; - public DataFrameTransformTask(long id, String type, String action, TaskId parentTask, DataFrameTransform transform, - DataFrameTransformState state, SchedulerEngine schedulerEngine, DataFrameAuditor auditor, + public TransformTask(long id, String type, String action, TaskId parentTask, TransformTaskParams transform, + TransformState state, SchedulerEngine schedulerEngine, TransformAuditor auditor, ThreadPool threadPool, Map headers) { - super(id, type, action, DataFrameField.PERSISTENT_TASK_DESCRIPTION_PREFIX + transform.getId(), parentTask, headers); + super(id, type, action, TransformField.PERSISTENT_TASK_DESCRIPTION_PREFIX + transform.getId(), parentTask, headers); this.transform = transform; this.schedulerEngine = schedulerEngine; this.threadPool = threadPool; this.auditor = auditor; IndexerState initialState = IndexerState.STOPPED; - DataFrameTransformTaskState initialTaskState = DataFrameTransformTaskState.STOPPED; + TransformTaskState initialTaskState = TransformTaskState.STOPPED; String initialReason = null; long initialGeneration = 0; - DataFrameIndexerPosition initialPosition = null; + TransformIndexerPosition initialPosition = null; if (state != null) { initialTaskState = state.getTaskState(); initialReason = state.getReason(); @@ -130,13 +129,13 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S return getState(); } - private ClientDataFrameIndexer getIndexer() { + private ClientTransformIndexer getIndexer() { return indexer.get(); } - public DataFrameTransformState getState() { + public TransformState getState() { if (getIndexer() == null) { - return new DataFrameTransformState( + return new TransformState( taskState.get(), initialIndexerState, initialPosition, @@ -144,7 +143,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S stateReason.get(), null); } else { - return new DataFrameTransformState( + return new TransformState( taskState.get(), indexer.get().getState(), indexer.get().getPosition(), @@ -154,9 +153,9 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } } - public DataFrameIndexerTransformStats getStats() { + public TransformIndexerStats getStats() { if (getIndexer() == null) { - return new DataFrameIndexerTransformStats(); + return new TransformIndexerStats(); } else { return getIndexer().getStats(); } @@ -170,9 +169,9 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S return currentCheckpoint.getAndIncrement(); } - public void getCheckpointingInfo(DataFrameTransformsCheckpointService transformsCheckpointService, - ActionListener listener) { - ClientDataFrameIndexer indexer = getIndexer(); + public void getCheckpointingInfo(TransformCheckpointService transformsCheckpointService, + ActionListener listener) { + ClientTransformIndexer indexer = getIndexer(); if (indexer == null) { transformsCheckpointService.getCheckpointingInfo( transform.getId(), @@ -199,12 +198,19 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S )); } - // Here `failOnConflict` is usually true, except when the initial start is called when the task is assigned to the node - synchronized void start(Long startingCheckpoint, boolean force, boolean failOnConflict, ActionListener listener) { - logger.debug("[{}] start called with force [{}] and state [{}].", getTransformId(), force, getState()); - if (taskState.get() == DataFrameTransformTaskState.FAILED && force == false) { + /** + * Starts the transform and schedules it to be triggered in the future. + * + * NOTE: This should ONLY be called via {@link TransformPersistentTasksExecutor} + * + * @param startingCheckpoint The starting checkpoint, could null. Null indicates that there is no starting checkpoint + * @param listener The listener to alert once started + */ + synchronized void start(Long startingCheckpoint, ActionListener listener) { + logger.debug("[{}] start called with state [{}].", getTransformId(), getState()); + if (taskState.get() == TransformTaskState.FAILED) { listener.onFailure(new ElasticsearchStatusException( - DataFrameMessages.getMessage(DATA_FRAME_CANNOT_START_FAILED_TRANSFORM, + TransformMessages.getMessage(CANNOT_START_FAILED_TRANSFORM, getTransformId(), stateReason.get()), RestStatus.CONFLICT)); @@ -214,7 +220,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S // If our state is failed AND the indexer is null, the user needs to _stop?force=true so that the indexer gets // fully initialized. // If we are NOT failed, then we can assume that `start` was just called early in the process. - String msg = taskState.get() == DataFrameTransformTaskState.FAILED ? + String msg = taskState.get() == TransformTaskState.FAILED ? "It failed during the initialization process; force stop to allow reinitialization." : "Try again later."; listener.onFailure(new ElasticsearchStatusException("Task for transform [{}] not fully initialized. {}", @@ -223,70 +229,51 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S msg)); return; } - // If we are already in a `STARTED` state, we should not attempt to call `.start` on the indexer again. - if (taskState.get() == DataFrameTransformTaskState.STARTED && failOnConflict) { - listener.onFailure(new ElasticsearchStatusException( - "Cannot start transform [{}] as it is already STARTED.", - RestStatus.CONFLICT, - getTransformId() - )); - return; - } final IndexerState newState = getIndexer().start(); if (Arrays.stream(RUNNING_STATES).noneMatch(newState::equals)) { - listener.onFailure(new ElasticsearchException("Cannot start task for data frame transform [{}], because state was [{}]", + listener.onFailure(new ElasticsearchException("Cannot start task for transform [{}], because state was [{}]", transform.getId(), newState)); return; } stateReason.set(null); - taskState.set(DataFrameTransformTaskState.STARTED); + taskState.set(TransformTaskState.STARTED); if (startingCheckpoint != null) { currentCheckpoint.set(startingCheckpoint); } - final DataFrameTransformState state = new DataFrameTransformState( - DataFrameTransformTaskState.STARTED, + final TransformState state = new TransformState( + TransformTaskState.STARTED, IndexerState.STOPPED, getIndexer().getPosition(), currentCheckpoint.get(), null, getIndexer().getProgress()); - logger.info("[{}] updating state for data frame transform to [{}].", transform.getId(), state.toString()); - // Even though the indexer information is persisted to an index, we still need DataFrameTransformTaskState in the clusterstate + logger.info("[{}] updating state for transform to [{}].", transform.getId(), state.toString()); + // Even though the indexer information is persisted to an index, we still need TransformTaskState in the clusterstate // This keeps track of STARTED, FAILED, STOPPED // This is because a FAILED state can occur because we cannot read the config from the internal index, which would imply that // we could not read the previous state information from said index. persistStateToClusterState(state, ActionListener.wrap( task -> { auditor.info(transform.getId(), - "Updated data frame transform state to [" + state.getTaskState() + "]."); + "Updated transform state to [" + state.getTaskState() + "]."); long now = System.currentTimeMillis(); // kick off the indexer triggered(new Event(schedulerJobName(), now, now)); registerWithSchedulerJob(); - listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); + listener.onResponse(new StartTransformAction.Response(true)); }, exc -> { auditor.warning(transform.getId(), "Failed to persist to cluster state while marking task as started. Failure: " + exc.getMessage()); logger.error(new ParameterizedMessage("[{}] failed updating state to [{}].", getTransformId(), state), exc); getIndexer().stop(); - listener.onFailure(new ElasticsearchException("Error while updating state for data frame transform [" + listener.onFailure(new ElasticsearchException("Error while updating state for transform [" + transform.getId() + "] to [" + state.getIndexerState() + "].", exc)); } )); } - /** - * Start the background indexer and set the task's state to started - * @param startingCheckpoint Set the current checkpoint to this value. If null the - * current checkpoint is not set - * @param force Whether to force start a failed task or not - * @param listener Started listener - */ - public synchronized void start(Long startingCheckpoint, boolean force, ActionListener listener) { - start(startingCheckpoint, force, true, listener); - } public synchronized void stop(boolean force) { logger.debug("[{}] stop called with force [{}] and state [{}]", getTransformId(), force, getState()); @@ -301,9 +288,9 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S return; } - if (taskState.get() == DataFrameTransformTaskState.FAILED && force == false) { + if (taskState.get() == TransformTaskState.FAILED && force == false) { throw new ElasticsearchStatusException( - DataFrameMessages.getMessage(DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM, + TransformMessages.getMessage(CANNOT_STOP_FAILED_TRANSFORM, getTransformId(), stateReason.get()), RestStatus.CONFLICT); @@ -315,7 +302,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S // Since we have called `stop` against the indexer, we have no more fear of triggering again. // But, since `doSaveState` is asynchronous, it is best to set the state as STARTED so that another `start` call cannot be // executed while we are wrapping up. - taskState.compareAndSet(DataFrameTransformTaskState.FAILED, DataFrameTransformTaskState.STARTED); + taskState.compareAndSet(TransformTaskState.FAILED, TransformTaskState.STARTED); if (state == IndexerState.STOPPED) { getIndexer().onStop(); getIndexer().doSaveState(state, getIndexer().getPosition(), () -> {}); @@ -330,11 +317,11 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } if (getIndexer() == null) { - logger.warn("[{}] data frame task triggered with an unintialized indexer.", getTransformId()); + logger.warn("[{}] transform task triggered with an unintialized indexer.", getTransformId()); return; } - if (taskState.get() == DataFrameTransformTaskState.FAILED || taskState.get() == DataFrameTransformTaskState.STOPPED) { + if (taskState.get() == TransformTaskState.FAILED || taskState.get() == TransformTaskState.STOPPED) { logger.debug("[{}] schedule was triggered for transform but task is [{}]. Ignoring trigger.", getTransformId(), taskState.get()); @@ -350,7 +337,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S return; } - logger.debug("[{}] data frame indexer schedule has triggered, state: [{}].", event.getJobName(), indexerState); + logger.debug("[{}] transform indexer schedule has triggered, state: [{}].", event.getJobName(), indexerState); // if it runs for the 1st time we just do it, if not we check for changes if (currentCheckpoint.get() == 0) { @@ -362,7 +349,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } /** - * Attempt to gracefully cleanup the data frame transform so it can be terminated. + * Attempt to gracefully cleanup the transform so it can be terminated. * This tries to remove the job from the scheduler and completes the persistent task */ synchronized void shutdown() { @@ -370,15 +357,15 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S markAsCompleted(); } - void persistStateToClusterState(DataFrameTransformState state, + void persistStateToClusterState(TransformState state, ActionListener> listener) { updatePersistentTaskState(state, ActionListener.wrap( success -> { - logger.debug("[{}] successfully updated state for data frame transform to [{}].", transform.getId(), state.toString()); + logger.debug("[{}] successfully updated state for transform to [{}].", transform.getId(), state.toString()); listener.onResponse(success); }, failure -> { - logger.error(new ParameterizedMessage("[{}] failed to update cluster state for data frame transform.", + logger.error(new ParameterizedMessage("[{}] failed to update cluster state for transform.", transform.getId()), failure); listener.onFailure(failure); @@ -389,13 +376,13 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S synchronized void markAsFailed(String reason, ActionListener listener) { // If we are already flagged as failed, this probably means that a second trigger started firing while we were attempting to // flag the previously triggered indexer as failed. Exit early as we are already flagged as failed. - if (taskState.get() == DataFrameTransformTaskState.FAILED) { + if (taskState.get() == TransformTaskState.FAILED) { logger.warn("[{}] is already failed but encountered new failure; reason [{}].", getTransformId(), reason); listener.onResponse(null); return; } - // If the indexer is `STOPPING` this means that `DataFrameTransformTask#stop` was called previously, but something caused - // the indexer to fail. Since `ClientDataFrameIndexer#doSaveState` will persist the state to the index once the indexer stops, + // If the indexer is `STOPPING` this means that `TransformTask#stop` was called previously, but something caused + // the indexer to fail. Since `ClientTransformIndexer#doSaveState` will persist the state to the index once the indexer stops, // it is probably best to NOT change the internal state of the task and allow the normal stopping logic to continue. if (getIndexer() != null && getIndexer().getState() == IndexerState.STOPPING) { logger.info("[{}] attempt to fail transform with reason [{}] while it was stopping.", getTransformId(), reason); @@ -413,10 +400,10 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S // We should not keep retrying. Either the task will be stopped, or started // If it is started again, it is registered again. deregisterSchedulerJob(); - taskState.set(DataFrameTransformTaskState.FAILED); + taskState.set(TransformTaskState.FAILED); stateReason.set(reason); - DataFrameTransformState newState = getState(); - // Even though the indexer information is persisted to an index, we still need DataFrameTransformTaskState in the clusterstate + TransformState newState = getState(); + // Even though the indexer information is persisted to an index, we still need TransformTaskState in the clusterstate // This keeps track of STARTED, FAILED, STOPPED // This is because a FAILED state could occur because we failed to read the config from the internal index, which would imply that // we could not read the previous state information from said index. @@ -440,7 +427,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S */ @Override public synchronized void onCancelled() { - logger.info("[{}] received cancellation request for data frame transform, state: [{}].", + logger.info("[{}] received cancellation request for transform, state: [{}].", getTransformId(), taskState.get()); if (getIndexer() != null && getIndexer().abort()) { @@ -449,7 +436,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } } - DataFrameTransformTask setNumFailureRetries(int numFailureRetries) { + TransformTask setNumFailureRetries(int numFailureRetries) { this.numFailureRetries = numFailureRetries; return this; } @@ -470,7 +457,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } private String schedulerJobName() { - return DataFrameTransformTask.SCHEDULE_NAME + "_" + getTransformId(); + return TransformTask.SCHEDULE_NAME + "_" + getTransformId(); } private SchedulerEngine.Schedule next() { @@ -480,7 +467,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S }; } - synchronized void initializeIndexer(ClientDataFrameIndexerBuilder indexerBuilder) { + synchronized void initializeIndexer(ClientTransformIndexerBuilder indexerBuilder) { indexer.set(indexerBuilder.build(this)); } @@ -501,7 +488,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S return threadPool; } - DataFrameTransformTaskState getTaskState() { + TransformTaskState getTaskState() { return taskState.get(); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java similarity index 95% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java index 80c4b50e6e2..e7e48b2a710 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms.pivot; +package org.elasticsearch.xpack.transform.transforms.pivot; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Numbers; @@ -21,10 +21,10 @@ import org.elasticsearch.search.aggregations.metrics.GeoBounds; import org.elasticsearch.search.aggregations.metrics.GeoCentroid; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation.SingleValue; import org.elasticsearch.search.aggregations.metrics.ScriptedMetric; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; -import org.elasticsearch.xpack.dataframe.transforms.IDGenerator; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.pivot.GroupConfig; +import org.elasticsearch.xpack.transform.transforms.IDGenerator; import java.util.Arrays; import java.util.Collection; @@ -35,7 +35,7 @@ import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.elasticsearch.xpack.dataframe.transforms.pivot.SchemaUtil.isNumericType; +import static org.elasticsearch.xpack.transform.transforms.pivot.SchemaUtil.isNumericType; public final class AggregationResultUtils { @@ -64,7 +64,7 @@ public final class AggregationResultUtils { Collection aggregationBuilders, Collection pipelineAggs, Map fieldTypeMap, - DataFrameIndexerTransformStats stats) { + TransformIndexerStats stats) { return agg.getBuckets().stream().map(bucket -> { stats.incrementNumDocuments(bucket.getDocCount()); Map document = new HashMap<>(); @@ -94,7 +94,7 @@ public final class AggregationResultUtils { } } - document.put(DataFrameField.DOCUMENT_ID_FIELD, idGen.getID()); + document.put(TransformField.DOCUMENT_ID_FIELD, idGen.getID()); return document; }); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Aggregations.java similarity index 93% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Aggregations.java index 7006ac7e0da..effa3eb1e0e 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Aggregations.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Aggregations.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms.pivot; +package org.elasticsearch.xpack.transform.transforms.pivot; import java.util.Locale; import java.util.Set; @@ -20,7 +20,7 @@ public final class Aggregations { private Aggregations() {} /** - * Supported aggregation by dataframe and corresponding meta information. + * Supported aggregation by transform and corresponding meta information. * * aggregationType - the name of the aggregation as returned by * {@link org.elasticsearch.search.aggregations.BaseAggregationBuilder#getType()}} @@ -62,7 +62,7 @@ public final class Aggregations { private static Set aggregationSupported = Stream.of(AggregationType.values()).map(AggregationType::name) .collect(Collectors.toSet()); - public static boolean isSupportedByDataframe(String aggregationType) { + public static boolean isSupportedByTransform(String aggregationType) { return aggregationSupported.contains(aggregationType.toUpperCase(Locale.ROOT)); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java similarity index 92% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java index 23b6d5b5621..33611cbb3a5 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/Pivot.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms.pivot; +package org.elasticsearch.xpack.transform.transforms.pivot; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -27,12 +27,12 @@ import org.elasticsearch.search.aggregations.PipelineAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.SingleGroupSource; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.GroupConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.SingleGroupSource; import java.io.IOException; import java.util.Collection; @@ -49,7 +49,7 @@ public class Pivot { public static final int DEFAULT_INITIAL_PAGE_SIZE = 500; public static final int TEST_QUERY_PAGE_SIZE = 50; - private static final String COMPOSITE_AGGREGATION_NAME = "_data_frame"; + private static final String COMPOSITE_AGGREGATION_NAME = "_transform"; private static final Logger logger = LogManager.getLogger(Pivot.class); private final PivotConfig config; @@ -72,7 +72,7 @@ public class Pivot { public void validateConfig() { for (AggregationBuilder agg : config.getAggregationConfig().getAggregatorFactories()) { - if (Aggregations.isSupportedByDataframe(agg.getType()) == false) { + if (Aggregations.isSupportedByTransform(agg.getType()) == false) { throw new ElasticsearchStatusException("Unsupported aggregation type [" + agg.getType() + "]", RestStatus.BAD_REQUEST); } } @@ -164,7 +164,7 @@ public class Pivot { public Stream> extractResults(CompositeAggregation agg, Map fieldTypeMap, - DataFrameIndexerTransformStats dataFrameIndexerTransformStats) { + TransformIndexerStats transformIndexerStats) { GroupConfig groups = config.getGroupConfig(); Collection aggregationBuilders = config.getAggregationConfig().getAggregatorFactories(); @@ -175,7 +175,7 @@ public class Pivot { aggregationBuilders, pipelineAggregationBuilders, fieldTypeMap, - dataFrameIndexerTransformStats); + transformIndexerStats); } public QueryBuilder filterBuckets(Map> changedBuckets) { @@ -239,7 +239,7 @@ public class Pivot { LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()); compositeAggregation = CompositeAggregationBuilder.parse(COMPOSITE_AGGREGATION_NAME, parser); } catch (IOException e) { - throw new RuntimeException(DataFrameMessages.DATA_FRAME_TRANSFORM_PIVOT_FAILED_TO_CREATE_COMPOSITE_AGGREGATION, e); + throw new RuntimeException(TransformMessages.TRANSFORM_PIVOT_FAILED_TO_CREATE_COMPOSITE_AGGREGATION, e); } return compositeAggregation; } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java similarity index 73% rename from x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java rename to x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java index 2aad83f5a25..e409f923ede 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/SchemaUtil.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java @@ -4,14 +4,14 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms.pivot; +package org.elasticsearch.xpack.transform.transforms.pivot; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsAction; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; +import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.index.mapper.NumberFieldMapper; @@ -21,7 +21,7 @@ import org.elasticsearch.search.aggregations.metrics.ScriptedMetricAggregationBu import org.elasticsearch.search.aggregations.support.MultiValuesSourceAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfig; import java.util.HashMap; import java.util.Map; @@ -99,12 +99,12 @@ public final class SchemaUtil { allFieldNames.putAll(fieldNamesForGrouping); getSourceFieldMappings(client, source, allFieldNames.values().toArray(new String[0]), - ActionListener.wrap( - sourceMappings -> listener.onResponse(resolveMappings(aggregationSourceFieldNames, - aggregationTypes, - fieldNamesForGrouping, - sourceMappings)), - listener::onFailure)); + ActionListener.wrap( + sourceMappings -> listener.onResponse(resolveMappings(aggregationSourceFieldNames, + aggregationTypes, + fieldNamesForGrouping, + sourceMappings)), + listener::onFailure)); } /** @@ -118,15 +118,16 @@ public final class SchemaUtil { public static void getDestinationFieldMappings(final Client client, final String index, final ActionListener> listener) { - GetFieldMappingsRequest fieldMappingRequest = new GetFieldMappingsRequest(); - fieldMappingRequest.indices(index); - fieldMappingRequest.fields("*"); + FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest() + .indices(index) + .fields("*") + .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); ClientHelper.executeAsyncWithOrigin(client, - ClientHelper.DATA_FRAME_ORIGIN, - GetFieldMappingsAction.INSTANCE, - fieldMappingRequest, + ClientHelper.TRANSFORM_ORIGIN, + FieldCapabilitiesAction.INSTANCE, + fieldCapabilitiesRequest, ActionListener.wrap( - r -> listener.onResponse(extractFieldMappings(r.mappings())), + r -> listener.onResponse(extractFieldMappings(r)), listener::onFailure )); } @@ -143,7 +144,7 @@ public final class SchemaUtil { String destinationMapping = Aggregations.resolveTargetMapping(aggregationName, sourceMapping); logger.debug("Deduced mapping for: [{}], agg type [{}] to [{}]", - targetFieldName, aggregationName, destinationMapping); + targetFieldName, aggregationName, destinationMapping); if (Aggregations.isDynamicMapping(destinationMapping)) { logger.debug("Dynamic target mapping set for field [{}] and aggregation [{}]", targetFieldName, aggregationName); @@ -171,42 +172,25 @@ public final class SchemaUtil { * Very "magic" helper method to extract the source mappings */ private static void getSourceFieldMappings(Client client, String[] index, String[] fields, - ActionListener> listener) { - GetFieldMappingsRequest fieldMappingRequest = new GetFieldMappingsRequest(); - fieldMappingRequest.indices(index); - fieldMappingRequest.fields(fields); - fieldMappingRequest.indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); - - client.execute(GetFieldMappingsAction.INSTANCE, fieldMappingRequest, ActionListener.wrap( - response -> listener.onResponse(extractFieldMappings(response.mappings())), + ActionListener> listener) { + FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest() + .indices(index) + .fields(fields) + .indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); + client.execute(FieldCapabilitiesAction.INSTANCE, fieldCapabilitiesRequest, ActionListener.wrap( + response -> listener.onResponse(extractFieldMappings(response)), listener::onFailure)); } - private static Map extractFieldMappings(Map>> mappings) { + private static Map extractFieldMappings(FieldCapabilitiesResponse response) { Map extractedTypes = new HashMap<>(); - mappings.forEach((indexName, docTypeToMapping) -> { - // "_doc" -> - docTypeToMapping.forEach((docType, fieldNameToMapping) -> { - // "my_field" -> - fieldNameToMapping.forEach((fieldName, fieldMapping) -> { - // "mapping" -> "my_field" -> - fieldMapping.sourceAsMap().forEach((name, typeMap) -> { - // expected object: { "type": type } - if (typeMap instanceof Map) { - final Map map = (Map) typeMap; - if (map.containsKey("type")) { - String type = map.get("type").toString(); - if (logger.isTraceEnabled()) { - logger.trace("Extracted type for [" + fieldName + "] : [" + type + "] from index [" + indexName + "]"); - } - // TODO: overwrites types, requires resolve if - // types are mixed - extractedTypes.put(fieldName, type); - } - } - }); - }); + response.get().forEach((fieldName, capabilitiesMap) -> { + // TODO: overwrites types, requires resolve if + // types are mixed + capabilitiesMap.forEach((name, capability) -> { + logger.trace("Extracted type for [{}] : [{}]", fieldName, capability.getType()); + extractedTypes.put(fieldName, capability.getType()); }); }); return extractedTypes; diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/LocalStateDataFrame.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/LocalStateTransform.java similarity index 73% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/LocalStateDataFrame.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/LocalStateTransform.java index f4b3221ec9d..0d2ae10de89 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/LocalStateDataFrame.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/LocalStateTransform.java @@ -3,21 +3,22 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe; +package org.elasticsearch.xpack.transform; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; + import java.nio.file.Path; -public class LocalStateDataFrame extends LocalStateCompositeXPackPlugin { +public class LocalStateTransform extends LocalStateCompositeXPackPlugin { - public LocalStateDataFrame(final Settings settings, final Path configPath) throws Exception { + public LocalStateTransform(final Settings settings, final Path configPath) throws Exception { super(settings, configPath); @SuppressWarnings("resource") - LocalStateDataFrame thisVar = this; + LocalStateTransform thisVar = this; - plugins.add(new DataFrame(settings) { + plugins.add(new Transform(settings) { @Override protected XPackLicenseState getLicenseState() { return thisVar.getLicenseState(); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSetTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformFeatureSetTests.java similarity index 68% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSetTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformFeatureSetTests.java index 35c3a19ab6b..ac78fb016f2 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSetTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformFeatureSetTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe; +package org.elasticsearch.xpack.transform; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; @@ -23,7 +23,7 @@ import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackFeatureSet.Usage; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; import org.junit.Before; import java.io.IOException; @@ -33,13 +33,13 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; -import static org.elasticsearch.xpack.dataframe.DataFrameFeatureSet.PROVIDED_STATS; +import static org.elasticsearch.xpack.transform.TransformFeatureSet.PROVIDED_STATS; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.core.Is.is; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class DataFrameFeatureSetTests extends ESTestCase { +public class TransformFeatureSetTests extends ESTestCase { private XPackLicenseState licenseState; @Before @@ -48,28 +48,56 @@ public class DataFrameFeatureSetTests extends ESTestCase { } public void testAvailable() { - DataFrameFeatureSet featureSet = new DataFrameFeatureSet(Settings.EMPTY, + TransformFeatureSet featureSet = new TransformFeatureSet(Settings.EMPTY, mock(ClusterService.class), mock(Client.class), licenseState); boolean available = randomBoolean(); - when(licenseState.isDataFrameAllowed()).thenReturn(available); + when(licenseState.isTransformAllowed()).thenReturn(available); assertThat(featureSet.available(), is(available)); } public void testEnabledSetting() { boolean enabled = randomBoolean(); Settings.Builder settings = Settings.builder(); - settings.put("xpack.data_frame.enabled", enabled); - DataFrameFeatureSet featureSet = new DataFrameFeatureSet(settings.build(), + settings.put("xpack.transform.enabled", enabled); + TransformFeatureSet featureSet = new TransformFeatureSet(settings.build(), mock(ClusterService.class), mock(Client.class), licenseState); assertThat(featureSet.enabled(), is(enabled)); } + public void testEnabledSettingFallback() { + boolean enabled = randomBoolean(); + Settings.Builder settings = Settings.builder(); + // use the deprecated setting + settings.put("xpack.data_frame.enabled", enabled); + TransformFeatureSet featureSet = new TransformFeatureSet(settings.build(), + mock(ClusterService.class), + mock(Client.class), + licenseState); + assertThat(featureSet.enabled(), is(enabled)); + assertWarnings("[xpack.data_frame.enabled] setting was deprecated in Elasticsearch and will be removed in a future release! " + + "See the breaking changes documentation for the next major version."); + } + + public void testEnabledSettingFallbackMix() { + Settings.Builder settings = Settings.builder(); + // use the deprecated setting + settings.put("xpack.data_frame.enabled", false); + settings.put("xpack.transform.enabled", true); + TransformFeatureSet featureSet = new TransformFeatureSet(settings.build(), + mock(ClusterService.class), + mock(Client.class), + licenseState); + assertThat(featureSet.enabled(), is(true)); + assertWarnings("[xpack.data_frame.enabled] setting was deprecated in Elasticsearch and will be removed in a future release! " + + "See the breaking changes documentation for the next major version."); + } + public void testEnabledDefault() { - DataFrameFeatureSet featureSet = new DataFrameFeatureSet(Settings.EMPTY, + TransformFeatureSet featureSet = new TransformFeatureSet(Settings.EMPTY, mock(ClusterService.class), mock(Client.class), licenseState); @@ -81,9 +109,9 @@ public class DataFrameFeatureSetTests extends ESTestCase { SearchResponse withEmptyAggs = mock(SearchResponse.class); when(withEmptyAggs.getAggregations()).thenReturn(emptyAggs); - assertThat(DataFrameFeatureSet.parseSearchAggs(withEmptyAggs), equalTo(new DataFrameIndexerTransformStats())); + assertThat(TransformFeatureSet.parseSearchAggs(withEmptyAggs), equalTo(new TransformIndexerStats())); - DataFrameIndexerTransformStats expectedStats = new DataFrameIndexerTransformStats( + TransformIndexerStats expectedStats = new TransformIndexerStats( 1, // numPages 2, // numInputDocuments 3, // numOutputDocuments @@ -98,13 +126,13 @@ public class DataFrameFeatureSetTests extends ESTestCase { int currentStat = 1; List aggs = new ArrayList<>(PROVIDED_STATS.length); for (String statName : PROVIDED_STATS) { - aggs.add(buildAgg(statName, (double) currentStat++)); + aggs.add(buildAgg(statName, currentStat++)); } Aggregations aggregations = new Aggregations(aggs); SearchResponse withAggs = mock(SearchResponse.class); when(withAggs.getAggregations()).thenReturn(aggregations); - assertThat(DataFrameFeatureSet.parseSearchAggs(withAggs), equalTo(expectedStats)); + assertThat(TransformFeatureSet.parseSearchAggs(withAggs), equalTo(expectedStats)); } private static Aggregation buildAgg(String name, double value) { @@ -115,10 +143,10 @@ public class DataFrameFeatureSetTests extends ESTestCase { } public void testUsageDisabled() throws IOException, InterruptedException, ExecutionException { - when(licenseState.isDataFrameAllowed()).thenReturn(true); + when(licenseState.isTransformAllowed()).thenReturn(true); Settings.Builder settings = Settings.builder(); - settings.put("xpack.data_frame.enabled", false); - DataFrameFeatureSet featureSet = new DataFrameFeatureSet(settings.build(), + settings.put("xpack.transform.enabled", false); + TransformFeatureSet featureSet = new TransformFeatureSet(settings.build(), mock(ClusterService.class), mock(Client.class), licenseState); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameSingleNodeTestCase.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformSingleNodeTestCase.java similarity index 86% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameSingleNodeTestCase.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformSingleNodeTestCase.java index 38820d315f7..5cdbba8e122 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameSingleNodeTestCase.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformSingleNodeTestCase.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe; +package org.elasticsearch.xpack.transform; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -15,7 +15,7 @@ import org.elasticsearch.index.reindex.ReindexPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xpack.core.template.TemplateUtils; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndex; import org.junit.Before; import java.util.Collection; @@ -25,14 +25,14 @@ import java.util.function.Consumer; import static org.hamcrest.Matchers.equalTo; -public abstract class DataFrameSingleNodeTestCase extends ESSingleNodeTestCase { +public abstract class TransformSingleNodeTestCase extends ESSingleNodeTestCase { @Before public void waitForTemplates() throws Exception { assertBusy(() -> { ClusterState state = client().admin().cluster().prepareState().get().getState(); - assertTrue("Timed out waiting for the data frame templates to be installed", TemplateUtils - .checkTemplateExistsAndVersionIsGTECurrentVersion(DataFrameInternalIndex.LATEST_INDEX_VERSIONED_NAME, state)); + assertTrue("Timed out waiting for the transform templates to be installed", TemplateUtils + .checkTemplateExistsAndVersionIsGTECurrentVersion(TransformInternalIndex.LATEST_INDEX_VERSIONED_NAME, state)); }); } @@ -46,7 +46,7 @@ public abstract class DataFrameSingleNodeTestCase extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return pluginList(LocalStateDataFrame.class, ReindexPlugin.class); + return pluginList(LocalStateTransform.class, ReindexPlugin.class); } protected void assertAsync(Consumer> function, T expected, CheckedConsumer onAnswer, diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformNodesTests.java similarity index 72% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformNodesTests.java index d13c33fe9aa..2ce3d8f882a 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/DataFrameNodesTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformNodesTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.action; +package org.elasticsearch.xpack.transform.action; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -15,26 +15,26 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; import java.util.Arrays; import java.util.Collections; import static org.hamcrest.Matchers.hasItemInArray; -public class DataFrameNodesTests extends ESTestCase { +public class TransformNodesTests extends ESTestCase { - public void testDataframeNodes() { - String dataFrameIdFoo = "df-id-foo"; - String dataFrameIdBar = "df-id-bar"; + public void testTransformNodes() { + String transformIdFoo = "df-id-foo"; + String transformIdBar = "df-id-bar"; PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); - tasksBuilder.addTask(dataFrameIdFoo, - DataFrameField.TASK_NAME, new DataFrameTransform(dataFrameIdFoo, Version.CURRENT, null), + tasksBuilder.addTask(transformIdFoo, + TransformField.TASK_NAME, new TransformTaskParams(transformIdFoo, Version.CURRENT, null), new PersistentTasksCustomMetaData.Assignment("node-1", "test assignment")); - tasksBuilder.addTask(dataFrameIdBar, - DataFrameField.TASK_NAME, new DataFrameTransform(dataFrameIdBar, Version.CURRENT, null), + tasksBuilder.addTask(transformIdBar, + TransformField.TASK_NAME, new TransformTaskParams(transformIdBar, Version.CURRENT, null), new PersistentTasksCustomMetaData.Assignment("node-2", "test assignment")); tasksBuilder.addTask("test-task1", "testTasks", new PersistentTaskParams() { @Override @@ -63,15 +63,15 @@ public class DataFrameNodesTests extends ESTestCase { .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) .build(); - String[] nodes = DataFrameNodes.dataFrameTaskNodes(Arrays.asList(dataFrameIdFoo, dataFrameIdBar), cs); + String[] nodes = TransformNodes.transformTaskNodes(Arrays.asList(transformIdFoo, transformIdBar), cs); assertEquals(2, nodes.length); assertThat(nodes, hasItemInArray("node-1")); assertThat(nodes, hasItemInArray("node-2")); } - public void testDataframeNodes_NoTasks() { + public void testTransformNodes_NoTasks() { ClusterState emptyState = ClusterState.builder(new ClusterName("_name")).build(); - String[] nodes = DataFrameNodes.dataFrameTaskNodes(Collections.singletonList("df-id"), emptyState); + String[] nodes = TransformNodes.transformTaskNodes(Collections.singletonList("df-id"), emptyState); assertEquals(0, nodes.length); } } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java similarity index 61% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java index 61fad63c832..9fcc44d7389 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformActionTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.action; +package org.elasticsearch.xpack.transform.action; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; @@ -12,11 +12,11 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformState; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.core.indexing.IndexerState; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; +import org.elasticsearch.xpack.core.transform.transforms.TransformState; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; import java.util.Arrays; import java.util.Collections; @@ -24,7 +24,7 @@ import java.util.Collections; import static org.elasticsearch.rest.RestStatus.CONFLICT; import static org.hamcrest.Matchers.equalTo; -public class TransportStopDataFrameTransformActionTests extends ESTestCase { +public class TransportStopTransformActionTests extends ESTestCase { private MetaData.Builder buildMetadata(PersistentTasksCustomMetaData ptasks) { return MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, ptasks); @@ -33,26 +33,26 @@ public class TransportStopDataFrameTransformActionTests extends ESTestCase { public void testTaskStateValidationWithNoTasks() { MetaData.Builder metaData = MetaData.builder(); ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(metaData); - TransportStopDataFrameTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); + TransportStopTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); PersistentTasksCustomMetaData.Builder pTasksBuilder = PersistentTasksCustomMetaData.builder(); csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); - TransportStopDataFrameTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); + TransportStopTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); } - public void testTaskStateValidationWithDataFrameTasks() { + public void testTaskStateValidationWithTransformTasks() { // Test with the task state being null PersistentTasksCustomMetaData.Builder pTasksBuilder = PersistentTasksCustomMetaData.builder() .addTask("non-failed-task", - DataFrameTransform.NAME, - new DataFrameTransform("data-frame-task-1", Version.CURRENT, null), + TransformTaskParams.NAME, + new TransformTaskParams("transform-task-1", Version.CURRENT, null), new PersistentTasksCustomMetaData.Assignment("current-data-node-with-1-tasks", "")); ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); - TransportStopDataFrameTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); + TransportStopTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); // test again with a non failed task but this time it has internal state - pTasksBuilder.updateTaskState("non-failed-task", new DataFrameTransformState(DataFrameTransformTaskState.STOPPED, + pTasksBuilder.updateTaskState("non-failed-task", new TransformState(TransformTaskState.STOPPED, IndexerState.STOPPED, null, 0L, @@ -60,13 +60,13 @@ public class TransportStopDataFrameTransformActionTests extends ESTestCase { null)); csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); - TransportStopDataFrameTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); + TransportStopTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); pTasksBuilder.addTask("failed-task", - DataFrameTransform.NAME, - new DataFrameTransform("data-frame-task-1", Version.CURRENT, null), + TransformTaskParams.NAME, + new TransformTaskParams("transform-task-1", Version.CURRENT, null), new PersistentTasksCustomMetaData.Assignment("current-data-node-with-1-tasks", "")) - .updateTaskState("failed-task", new DataFrameTransformState(DataFrameTransformTaskState.FAILED, + .updateTaskState("failed-task", new TransformState(TransformTaskState.FAILED, IndexerState.STOPPED, null, 0L, @@ -74,19 +74,19 @@ public class TransportStopDataFrameTransformActionTests extends ESTestCase { null)); csBuilder = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); - TransportStopDataFrameTransformAction.validateTaskState(csBuilder.build(), Arrays.asList("non-failed-task", "failed-task"), true); + TransportStopTransformAction.validateTaskState(csBuilder.build(), Arrays.asList("non-failed-task", "failed-task"), true); - TransportStopDataFrameTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); + TransportStopTransformAction.validateTaskState(csBuilder.build(), Collections.singletonList("non-failed-task"), false); ClusterState.Builder csBuilderFinal = ClusterState.builder(new ClusterName("_name")).metaData(buildMetadata(pTasksBuilder.build())); ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, - () -> TransportStopDataFrameTransformAction.validateTaskState(csBuilderFinal.build(), + () -> TransportStopTransformAction.validateTaskState(csBuilderFinal.build(), Collections.singletonList("failed-task"), false)); assertThat(ex.status(), equalTo(CONFLICT)); assertThat(ex.getMessage(), - equalTo(DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_CANNOT_STOP_FAILED_TRANSFORM, + equalTo(TransformMessages.getMessage(TransformMessages.CANNOT_STOP_FAILED_TRANSFORM, "failed-task", "task has failed"))); } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DefaultCheckpointProviderTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java similarity index 78% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DefaultCheckpointProviderTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java index 0912724e702..9a81dcd812a 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DefaultCheckpointProviderTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/DefaultCheckpointProviderTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.checkpoint; +package org.elasticsearch.xpack.transform.checkpoint; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; @@ -15,11 +15,11 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.MockLogAppender.LoggingExpectation; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; -import org.elasticsearch.xpack.dataframe.notifications.MockDataFrameAuditor; -import org.elasticsearch.xpack.dataframe.notifications.MockDataFrameAuditor.AuditExpectation; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests; +import org.elasticsearch.xpack.transform.notifications.MockTransformAuditor; +import org.elasticsearch.xpack.transform.notifications.MockTransformAuditor.AuditExpectation; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; import org.junit.Before; import java.util.Collections; @@ -31,25 +31,25 @@ public class DefaultCheckpointProviderTests extends ESTestCase { private Client client; - private MockDataFrameAuditor dataFrameAuditor; - private DataFrameTransformsConfigManager dataFrameTransformsConfigManager; + private MockTransformAuditor transformAuditor; + private TransformConfigManager transformConfigManager; private Logger checkpointProviderlogger = LogManager.getLogger(DefaultCheckpointProvider.class); @Before public void setUpMocks() throws IllegalAccessException { client = mock(Client.class); - dataFrameTransformsConfigManager = mock(DataFrameTransformsConfigManager.class); - dataFrameAuditor = new MockDataFrameAuditor(); + transformConfigManager = mock(TransformConfigManager.class); + transformAuditor = new MockTransformAuditor(); } public void testReportSourceIndexChangesRunsEmpty() throws Exception { String transformId = getTestName(); - DataFrameTransformConfig transformConfig = DataFrameTransformConfigTests.randomDataFrameTransformConfig(transformId); + TransformConfig transformConfig = TransformConfigTests.randomTransformConfig(transformId); DefaultCheckpointProvider provider = new DefaultCheckpointProvider( client, - dataFrameTransformsConfigManager, - dataFrameAuditor, + transformConfigManager, + transformAuditor, transformConfig); assertExpectation( @@ -57,7 +57,7 @@ public class DefaultCheckpointProviderTests extends ESTestCase { checkpointProviderlogger.getName(), Level.WARN, "Source did not resolve to any open indexes for transform [" + transformId + "]"), - new MockDataFrameAuditor.SeenAuditExpectation("warn when source is empty", + new MockTransformAuditor.SeenAuditExpectation("warn when source is empty", org.elasticsearch.xpack.core.common.notifications.Level.WARNING, transformId, "Source did not resolve to any open indexes"), @@ -70,7 +70,7 @@ public class DefaultCheckpointProviderTests extends ESTestCase { checkpointProviderlogger.getName(), Level.WARN, "Source did not resolve to any concrete indexes"), - new MockDataFrameAuditor.UnseenAuditExpectation("do not warn if empty again", + new MockTransformAuditor.UnseenAuditExpectation("do not warn if empty again", org.elasticsearch.xpack.core.common.notifications.Level.WARNING, transformId, "Source did not resolve to any concrete indexes"), @@ -81,12 +81,12 @@ public class DefaultCheckpointProviderTests extends ESTestCase { public void testReportSourceIndexChangesAddDelete() throws Exception { String transformId = getTestName(); - DataFrameTransformConfig transformConfig = DataFrameTransformConfigTests.randomDataFrameTransformConfig(transformId); + TransformConfig transformConfig = TransformConfigTests.randomTransformConfig(transformId); DefaultCheckpointProvider provider = new DefaultCheckpointProvider( client, - dataFrameTransformsConfigManager, - dataFrameAuditor, + transformConfigManager, + transformAuditor, transformConfig); assertExpectation( @@ -95,7 +95,7 @@ public class DefaultCheckpointProviderTests extends ESTestCase { Level.DEBUG, "Source index resolve found changes, removedIndexes: [index], new indexes: [other_index] for transform [" + transformId + "]"), - new MockDataFrameAuditor.SeenAuditExpectation("info about adds/removal", + new MockTransformAuditor.SeenAuditExpectation("info about adds/removal", org.elasticsearch.xpack.core.common.notifications.Level.INFO, transformId, "Source index resolve found changes, removedIndexes: [index], new indexes: [other_index]"), @@ -109,7 +109,7 @@ public class DefaultCheckpointProviderTests extends ESTestCase { Level.DEBUG, "Source index resolve found changes, removedIndexes: [index], new indexes: [] for transform [" + transformId + "]"), - new MockDataFrameAuditor.SeenAuditExpectation("info about adds/removal", + new MockTransformAuditor.SeenAuditExpectation("info about adds/removal", org.elasticsearch.xpack.core.common.notifications.Level.INFO, transformId, "Source index resolve found changes, removedIndexes: [index], new indexes: []"), @@ -122,7 +122,7 @@ public class DefaultCheckpointProviderTests extends ESTestCase { Level.DEBUG, "Source index resolve found changes, removedIndexes: [], new indexes: [other_index] for transform [" + transformId + "]"), - new MockDataFrameAuditor.SeenAuditExpectation("info about adds/removal", + new MockTransformAuditor.SeenAuditExpectation("info about adds/removal", org.elasticsearch.xpack.core.common.notifications.Level.INFO, transformId, "Source index resolve found changes, removedIndexes: [], new indexes: [other_index]"), @@ -133,12 +133,12 @@ public class DefaultCheckpointProviderTests extends ESTestCase { public void testReportSourceIndexChangesAddDeleteMany() throws Exception { String transformId = getTestName(); - DataFrameTransformConfig transformConfig = DataFrameTransformConfigTests.randomDataFrameTransformConfig(transformId); + TransformConfig transformConfig = TransformConfigTests.randomTransformConfig(transformId); DefaultCheckpointProvider provider = new DefaultCheckpointProvider( client, - dataFrameTransformsConfigManager, - dataFrameAuditor, + transformConfigManager, + transformAuditor, transformConfig); HashSet oldSet = new HashSet<>(); @@ -156,7 +156,7 @@ public class DefaultCheckpointProviderTests extends ESTestCase { Level.DEBUG, "Source index resolve found more than 10 changes, [50] removed indexes, [50] new indexes for transform [" + transformId + "]"), - new MockDataFrameAuditor.SeenAuditExpectation("info about adds/removal", + new MockTransformAuditor.SeenAuditExpectation("info about adds/removal", org.elasticsearch.xpack.core.common.notifications.Level.INFO, transformId, "Source index resolve found more than 10 changes, [50] removed indexes, [50] new indexes"), @@ -175,13 +175,13 @@ public class DefaultCheckpointProviderTests extends ESTestCase { mockLogAppender.addExpectation(loggingExpectation); // always start fresh - dataFrameAuditor.reset(); - dataFrameAuditor.addExpectation(auditExpectation); + transformAuditor.reset(); + transformAuditor.addExpectation(auditExpectation); try { Loggers.addAppender(checkpointProviderlogger, mockLogAppender); codeBlock.run(); mockLogAppender.assertAllExpectationsMatched(); - dataFrameAuditor.assertAllExpectationsMatched(); + transformAuditor.assertAllExpectationsMatched(); } finally { Loggers.removeAppender(checkpointProviderlogger, mockLogAppender); mockLogAppender.stop(); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformCheckpointServiceNodeTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointServiceNodeTests.java similarity index 75% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformCheckpointServiceNodeTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointServiceNodeTests.java index ad45ab61772..300ad34e5cb 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformCheckpointServiceNodeTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointServiceNodeTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.checkpoint; +package org.elasticsearch.xpack.transform.checkpoint; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -39,17 +39,17 @@ import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.search.suggest.completion.CompletionStats; import org.elasticsearch.test.client.NoOpClient; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPositionTests; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpointingInfo; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgress; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformProgressTests; -import org.elasticsearch.xpack.dataframe.DataFrameSingleNodeTestCase; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPositionTests; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointingInfo; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgressTests; +import org.elasticsearch.xpack.transform.TransformSingleNodeTestCase; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; import org.junit.AfterClass; import org.junit.Before; @@ -65,15 +65,15 @@ import java.util.Set; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class DataFrameTransformCheckpointServiceNodeTests extends DataFrameSingleNodeTestCase { +public class TransformCheckpointServiceNodeTests extends TransformSingleNodeTestCase { // re-use the mock client for the whole test suite as the underlying thread pool and the // corresponding context if recreated cause unreliable test execution // see https://github.com/elastic/elasticsearch/issues/45238 and https://github.com/elastic/elasticsearch/issues/42577 private static MockClientForCheckpointing mockClientForCheckpointing = null; - private DataFrameTransformsConfigManager transformsConfigManager; - private DataFrameTransformsCheckpointService transformsCheckpointService; + private TransformConfigManager transformsConfigManager; + private TransformCheckpointService transformsCheckpointService; private class MockClientForCheckpointing extends NoOpClient { @@ -127,14 +127,14 @@ public class DataFrameTransformCheckpointServiceNodeTests extends DataFrameSingl public void createComponents() { // it's not possible to run it as @BeforeClass as clients aren't initialized if (mockClientForCheckpointing == null) { - mockClientForCheckpointing = new MockClientForCheckpointing("DataFrameTransformCheckpointServiceNodeTests"); + mockClientForCheckpointing = new MockClientForCheckpointing("TransformCheckpointServiceNodeTests"); } - transformsConfigManager = new DataFrameTransformsConfigManager(client(), xContentRegistry()); + transformsConfigManager = new TransformConfigManager(client(), xContentRegistry()); // use a mock for the checkpoint service - DataFrameAuditor mockAuditor = mock(DataFrameAuditor.class); - transformsCheckpointService = new DataFrameTransformsCheckpointService(mockClientForCheckpointing, + TransformAuditor mockAuditor = mock(TransformAuditor.class); + transformsCheckpointService = new TransformCheckpointService(mockClientForCheckpointing, transformsConfigManager, mockAuditor); } @@ -149,25 +149,25 @@ public class DataFrameTransformCheckpointServiceNodeTests extends DataFrameSingl String transformId = randomAlphaOfLengthBetween(3, 10); long timestamp = 1000; - DataFrameTransformCheckpoint checkpoint = new DataFrameTransformCheckpoint(transformId, timestamp, 1L, + TransformCheckpoint checkpoint = new TransformCheckpoint(transformId, timestamp, 1L, createCheckPointMap(transformId, 10, 10, 10), null); // create transform assertAsync( listener -> transformsConfigManager - .putTransformConfiguration(DataFrameTransformConfigTests.randomDataFrameTransformConfig(transformId), listener), + .putTransformConfiguration(TransformConfigTests.randomTransformConfig(transformId), listener), true, null, null); // by design no exception is thrown but an empty checkpoint is returned assertAsync(listener -> transformsConfigManager.getTransformCheckpoint(transformId, 1L, listener), - DataFrameTransformCheckpoint.EMPTY, null, null); + TransformCheckpoint.EMPTY, null, null); assertAsync(listener -> transformsConfigManager.putTransformCheckpoint(checkpoint, listener), true, null, null); assertAsync(listener -> transformsConfigManager.getTransformCheckpoint(transformId, 1L, listener), checkpoint, null, null); // add a 2nd checkpoint - DataFrameTransformCheckpoint checkpoint2 = new DataFrameTransformCheckpoint(transformId, timestamp + 100L, 2L, + TransformCheckpoint checkpoint2 = new TransformCheckpoint(transformId, timestamp + 100L, 2L, createCheckPointMap(transformId, 20, 20, 20), null); assertAsync(listener -> transformsConfigManager.putTransformCheckpoint(checkpoint2, listener), true, null, null); @@ -181,38 +181,38 @@ public class DataFrameTransformCheckpointServiceNodeTests extends DataFrameSingl // checkpoints should be empty again assertAsync(listener -> transformsConfigManager.getTransformCheckpoint(transformId, 1L, listener), - DataFrameTransformCheckpoint.EMPTY, null, null); + TransformCheckpoint.EMPTY, null, null); assertAsync(listener -> transformsConfigManager.getTransformCheckpoint(transformId, 2L, listener), - DataFrameTransformCheckpoint.EMPTY, null, null); + TransformCheckpoint.EMPTY, null, null); } public void testGetCheckpointStats() throws InterruptedException { String transformId = randomAlphaOfLengthBetween(3, 10); long timestamp = 1000; - DataFrameIndexerPosition position = DataFrameIndexerPositionTests.randomDataFrameIndexerPosition(); - DataFrameTransformProgress progress = DataFrameTransformProgressTests.randomDataFrameTransformProgress(); + TransformIndexerPosition position = TransformIndexerPositionTests.randomTransformIndexerPosition(); + TransformProgress progress = TransformProgressTests.randomTransformProgress(); // create transform assertAsync( listener -> transformsConfigManager - .putTransformConfiguration(DataFrameTransformConfigTests.randomDataFrameTransformConfig(transformId), listener), + .putTransformConfiguration(TransformConfigTests.randomTransformConfig(transformId), listener), true, null, null); - DataFrameTransformCheckpoint checkpoint = new DataFrameTransformCheckpoint(transformId, timestamp, 1L, + TransformCheckpoint checkpoint = new TransformCheckpoint(transformId, timestamp, 1L, createCheckPointMap(transformId, 10, 10, 10), null); assertAsync(listener -> transformsConfigManager.putTransformCheckpoint(checkpoint, listener), true, null, null); - DataFrameTransformCheckpoint checkpoint2 = new DataFrameTransformCheckpoint(transformId, timestamp + 100L, 2L, + TransformCheckpoint checkpoint2 = new TransformCheckpoint(transformId, timestamp + 100L, 2L, createCheckPointMap(transformId, 20, 20, 20), null); assertAsync(listener -> transformsConfigManager.putTransformCheckpoint(checkpoint2, listener), true, null, null); mockClientForCheckpointing.setShardStats(createShardStats(createCheckPointMap(transformId, 20, 20, 20))); - DataFrameTransformCheckpointingInfo checkpointInfo = new DataFrameTransformCheckpointingInfo( - new DataFrameTransformCheckpointStats(1, null, null, timestamp, 0L), - new DataFrameTransformCheckpointStats(2, position, progress, timestamp + 100L, 0L), + TransformCheckpointingInfo checkpointInfo = new TransformCheckpointingInfo( + new TransformCheckpointStats(1, null, null, timestamp, 0L), + new TransformCheckpointStats(2, position, progress, timestamp + 100L, 0L), 30L); assertAsync(listener -> @@ -220,9 +220,9 @@ public class DataFrameTransformCheckpointServiceNodeTests extends DataFrameSingl checkpointInfo, null, null); mockClientForCheckpointing.setShardStats(createShardStats(createCheckPointMap(transformId, 10, 50, 33))); - checkpointInfo = new DataFrameTransformCheckpointingInfo( - new DataFrameTransformCheckpointStats(1, null, null, timestamp, 0L), - new DataFrameTransformCheckpointStats(2, position, progress, timestamp + 100L, 0L), + checkpointInfo = new TransformCheckpointingInfo( + new TransformCheckpointStats(1, null, null, timestamp, 0L), + new TransformCheckpointStats(2, position, progress, timestamp + 100L, 0L), 63L); assertAsync(listener -> transformsCheckpointService.getCheckpointingInfo(transformId, 1, position, progress, listener), @@ -230,9 +230,9 @@ public class DataFrameTransformCheckpointServiceNodeTests extends DataFrameSingl // same as current mockClientForCheckpointing.setShardStats(createShardStats(createCheckPointMap(transformId, 10, 10, 10))); - checkpointInfo = new DataFrameTransformCheckpointingInfo( - new DataFrameTransformCheckpointStats(1, null, null, timestamp, 0L), - new DataFrameTransformCheckpointStats(2, position, progress, timestamp + 100L, 0L), + checkpointInfo = new TransformCheckpointingInfo( + new TransformCheckpointStats(1, null, null, timestamp, 0L), + new TransformCheckpointStats(2, position, progress, timestamp + 100L, 0L), 0L); assertAsync(listener -> transformsCheckpointService.getCheckpointingInfo(transformId, 1, position, progress, listener), diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformsCheckpointServiceTests.java similarity index 98% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformsCheckpointServiceTests.java index 8a9a5bd50af..d343dd5e006 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TransformsCheckpointServiceTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.checkpoint; +package org.elasticsearch.xpack.transform.checkpoint; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; @@ -45,7 +45,7 @@ import java.util.Set; import static org.hamcrest.Matchers.containsString; -public class DataFrameTransformsCheckpointServiceTests extends ESTestCase { +public class TransformsCheckpointServiceTests extends ESTestCase { public void testExtractIndexCheckpoints() { Map expectedCheckpoints = new HashMap<>(); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/notifications/MockDataFrameAuditor.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/notifications/MockTransformAuditor.java similarity index 96% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/notifications/MockDataFrameAuditor.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/notifications/MockTransformAuditor.java index 41a499aa6e1..7e6a7ba12e7 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/notifications/MockDataFrameAuditor.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/notifications/MockTransformAuditor.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.notifications; +package org.elasticsearch.xpack.transform.notifications; import org.elasticsearch.client.Client; import org.elasticsearch.common.regex.Regex; @@ -24,11 +24,11 @@ import static org.mockito.Mockito.mock; * * TODO: ideally this would be a generalized MockAuditor, but the current inheritance doesn't let us */ -public class MockDataFrameAuditor extends DataFrameAuditor { +public class MockTransformAuditor extends TransformAuditor { private List expectations; - public MockDataFrameAuditor() { + public MockTransformAuditor() { super(mock(Client.class), "mock_node_name"); expectations = new CopyOnWriteArrayList<>(); } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/SeqNoPrimaryTermAndIndexTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndexTests.java similarity index 97% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/SeqNoPrimaryTermAndIndexTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndexTests.java index 747c7baf390..63fa5e3a77d 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/SeqNoPrimaryTermAndIndexTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndexTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.persistence; +package org.elasticsearch.xpack.transform.persistence; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.index.shard.ShardId; diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformConfigManagerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformConfigManagerTests.java new file mode 100644 index 00000000000..c4f1f94144c --- /dev/null +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformConfigManagerTests.java @@ -0,0 +1,390 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.persistence; + +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.xpack.core.action.util.PageParams; +import org.elasticsearch.xpack.core.transform.TransformMessages; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpointTests; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.TransformStoredDoc; +import org.elasticsearch.xpack.core.transform.transforms.TransformStoredDocTests; +import org.elasticsearch.xpack.transform.TransformSingleNodeTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.transform.persistence.TransformInternalIndex.mappings; +import static org.elasticsearch.xpack.transform.persistence.TransformConfigManager.TO_XCONTENT_PARAMS; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class TransformConfigManagerTests extends TransformSingleNodeTestCase { + + private TransformConfigManager transformConfigManager; + + @Before + public void createComponents() { + transformConfigManager = new TransformConfigManager(client(), xContentRegistry()); + } + + public void testGetMissingTransform() throws InterruptedException { + // the index does not exist yet + assertAsync(listener -> transformConfigManager.getTransformConfiguration("not_there", listener), (TransformConfig) null, + null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, "not_there"), + e.getMessage()); + }); + + // create one transform and test with an existing index + assertAsync( + listener -> transformConfigManager + .putTransformConfiguration(TransformConfigTests.randomTransformConfig(), listener), + true, null, null); + + // same test, but different code path + assertAsync(listener -> transformConfigManager.getTransformConfiguration("not_there", listener), (TransformConfig) null, + null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, "not_there"), + e.getMessage()); + }); + } + + public void testDeleteMissingTransform() throws InterruptedException { + // the index does not exist yet + assertAsync(listener -> transformConfigManager.deleteTransform("not_there", listener), (Boolean) null, null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, "not_there"), e.getMessage()); + }); + + // create one transform and test with an existing index + assertAsync( + listener -> transformConfigManager + .putTransformConfiguration(TransformConfigTests.randomTransformConfig(), listener), + true, null, null); + + // same test, but different code path + assertAsync(listener -> transformConfigManager.deleteTransform("not_there", listener), (Boolean) null, null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, "not_there"), e.getMessage()); + }); + } + + public void testCreateReadDeleteTransform() throws InterruptedException { + TransformConfig transformConfig = TransformConfigTests.randomTransformConfig(); + + // create transform + assertAsync(listener -> transformConfigManager.putTransformConfiguration(transformConfig, listener), true, null, null); + + // read transform + assertAsync(listener -> transformConfigManager.getTransformConfiguration(transformConfig.getId(), listener), transformConfig, null, + null); + + // try to create again + assertAsync(listener -> transformConfigManager.putTransformConfiguration(transformConfig, listener), (Boolean) null, null, e -> { + assertEquals(ResourceAlreadyExistsException.class, e.getClass()); + assertEquals(TransformMessages.getMessage(TransformMessages.REST_PUT_TRANSFORM_EXISTS, transformConfig.getId()), + e.getMessage()); + }); + + // delete transform + assertAsync(listener -> transformConfigManager.deleteTransform(transformConfig.getId(), listener), true, null, null); + + // delete again + assertAsync(listener -> transformConfigManager.deleteTransform(transformConfig.getId(), listener), (Boolean) null, null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformConfig.getId()), + e.getMessage()); + }); + + // try to get deleted transform + assertAsync(listener -> transformConfigManager.getTransformConfiguration(transformConfig.getId(), listener), + (TransformConfig) null, null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, transformConfig.getId()), + e.getMessage()); + }); + } + + public void testCreateReadDeleteCheckPoint() throws InterruptedException { + TransformCheckpoint checkpoint = TransformCheckpointTests.randomTransformCheckpoints(); + + // create + assertAsync(listener -> transformConfigManager.putTransformCheckpoint(checkpoint, listener), true, null, null); + + // read + assertAsync(listener -> transformConfigManager.getTransformCheckpoint(checkpoint.getTransformId(), checkpoint.getCheckpoint(), + listener), checkpoint, null, null); + + // delete + assertAsync(listener -> transformConfigManager.deleteTransform(checkpoint.getTransformId(), listener), true, null, null); + + // delete again + assertAsync(listener -> transformConfigManager.deleteTransform(checkpoint.getTransformId(), listener), (Boolean) null, null, e -> { + assertEquals(ResourceNotFoundException.class, e.getClass()); + assertEquals(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, checkpoint.getTransformId()), + e.getMessage()); + }); + + // getting a non-existing checkpoint returns null + assertAsync(listener -> transformConfigManager.getTransformCheckpoint(checkpoint.getTransformId(), checkpoint.getCheckpoint(), + listener), TransformCheckpoint.EMPTY, null, null); + } + + public void testExpandIds() throws Exception { + TransformConfig transformConfig1 = TransformConfigTests.randomTransformConfig("transform1_expand"); + TransformConfig transformConfig2 = TransformConfigTests.randomTransformConfig("transform2_expand"); + TransformConfig transformConfig3 = TransformConfigTests.randomTransformConfig("transform3_expand"); + + // create transform + assertAsync(listener -> transformConfigManager.putTransformConfiguration(transformConfig1, listener), true, null, null); + assertAsync(listener -> transformConfigManager.putTransformConfiguration(transformConfig2, listener), true, null, null); + assertAsync(listener -> transformConfigManager.putTransformConfiguration(transformConfig3, listener), true, null, null); + + + // expand 1 id + assertAsync(listener -> + transformConfigManager.expandTransformIds(transformConfig1.getId(), + PageParams.defaultParams(), + true, + listener), + new Tuple<>(1L, Collections.singletonList("transform1_expand")), + null, + null); + + // expand 2 ids explicitly + assertAsync(listener -> + transformConfigManager.expandTransformIds("transform1_expand,transform2_expand", + PageParams.defaultParams(), + true, + listener), + new Tuple<>(2L, Arrays.asList("transform1_expand", "transform2_expand")), + null, + null); + + // expand 3 ids wildcard and explicit + assertAsync(listener -> + transformConfigManager.expandTransformIds("transform1*,transform2_expand,transform3_expand", + PageParams.defaultParams(), + true, + listener), + new Tuple<>(3L, Arrays.asList("transform1_expand", "transform2_expand", "transform3_expand")), + null, + null); + + // expand 3 ids _all + assertAsync(listener -> + transformConfigManager.expandTransformIds("_all", + PageParams.defaultParams(), + true, + listener), + new Tuple<>(3L, Arrays.asList("transform1_expand", "transform2_expand", "transform3_expand")), + null, + null); + + // expand 1 id _all with pagination + assertAsync(listener -> + transformConfigManager.expandTransformIds("_all", + new PageParams(0, 1), + true, + listener), + new Tuple<>(3L, Collections.singletonList("transform1_expand")), + null, + null); + + // expand 2 later ids _all with pagination + assertAsync(listener -> + transformConfigManager.expandTransformIds("_all", + new PageParams(1, 2), + true, + listener), + new Tuple<>(3L, Arrays.asList("transform2_expand", "transform3_expand")), + null, + null); + + // expand 1 id explicitly that does not exist + assertAsync(listener -> + transformConfigManager.expandTransformIds("unknown,unknown2", + new PageParams(1, 2), + true, + listener), + (Tuple>)null, + null, + e -> { + assertThat(e, instanceOf(ResourceNotFoundException.class)); + assertThat(e.getMessage(), + equalTo(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, "unknown,unknown2"))); + }); + + // expand 1 id implicitly that does not exist + assertAsync(listener -> + transformConfigManager.expandTransformIds("unknown*", + new PageParams(1, 2), + false, + listener), + (Tuple>)null, + null, + e -> { + assertThat(e, instanceOf(ResourceNotFoundException.class)); + assertThat(e.getMessage(), + equalTo(TransformMessages.getMessage(TransformMessages.REST_UNKNOWN_TRANSFORM, "unknown*"))); + }); + + } + + public void testStoredDoc() throws InterruptedException { + String transformId = "transform_test_stored_doc_create_read_update"; + + TransformStoredDoc storedDocs = TransformStoredDocTests.randomTransformStoredDoc(transformId); + SeqNoPrimaryTermAndIndex firstIndex = new SeqNoPrimaryTermAndIndex(0, 1, TransformInternalIndex.LATEST_INDEX_NAME); + + assertAsync(listener -> transformConfigManager.putOrUpdateTransformStoredDoc(storedDocs, null, listener), + firstIndex, + null, + null); + assertAsync(listener -> transformConfigManager.getTransformStoredDoc(transformId, listener), + Tuple.tuple(storedDocs, firstIndex), + null, + null); + + SeqNoPrimaryTermAndIndex secondIndex = new SeqNoPrimaryTermAndIndex(1, 1, TransformInternalIndex.LATEST_INDEX_NAME); + TransformStoredDoc updated = TransformStoredDocTests.randomTransformStoredDoc(transformId); + assertAsync(listener -> transformConfigManager.putOrUpdateTransformStoredDoc(updated, firstIndex, listener), + secondIndex, + null, + null); + assertAsync(listener -> transformConfigManager.getTransformStoredDoc(transformId, listener), + Tuple.tuple(updated, secondIndex), + null, + null); + + assertAsync(listener -> transformConfigManager.putOrUpdateTransformStoredDoc(updated, firstIndex, listener), + (SeqNoPrimaryTermAndIndex)null, + r -> fail("did not fail with version conflict."), + e -> assertThat( + e.getMessage(), + equalTo("Failed to persist transform statistics for transform [transform_test_stored_doc_create_read_update]")) + ); + } + + public void testGetStoredDocMultiple() throws InterruptedException { + int numStats = randomIntBetween(10, 15); + List expectedDocs = new ArrayList<>(); + for (int i=0; i transformConfigManager.putOrUpdateTransformStoredDoc(stat, null, listener), + initialSeqNo, + null, + null); + } + + // remove one of the put docs so we don't retrieve all + if (expectedDocs.size() > 1) { + expectedDocs.remove(expectedDocs.size() - 1); + } + List ids = expectedDocs.stream().map(TransformStoredDoc::getId).collect(Collectors.toList()); + + // returned docs will be ordered by id + expectedDocs.sort(Comparator.comparing(TransformStoredDoc::getId)); + assertAsync(listener -> transformConfigManager.getTransformStoredDoc(ids, listener), expectedDocs, null, null); + } + + public void testDeleteOldTransformConfigurations() throws Exception { + String oldIndex = TransformInternalIndex.INDEX_PATTERN + "1"; + String transformId = "transform_test_delete_old_configurations"; + String docId = TransformConfig.documentId(transformId); + TransformConfig transformConfig = TransformConfigTests + .randomTransformConfig("transform_test_delete_old_configurations"); + client().admin().indices().create(new CreateIndexRequest(oldIndex) + .mapping(MapperService.SINGLE_MAPPING_NAME, mappings())).actionGet(); + + try(XContentBuilder builder = XContentFactory.jsonBuilder()) { + XContentBuilder source = transformConfig.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); + IndexRequest request = new IndexRequest(oldIndex) + .source(source) + .id(docId) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + client().index(request).actionGet(); + } + + assertAsync(listener -> transformConfigManager.putTransformConfiguration(transformConfig, listener), true, null, null); + + assertThat(client().get(new GetRequest(oldIndex).id(docId)).actionGet().isExists(), is(true)); + assertThat(client().get(new GetRequest(TransformInternalIndex.LATEST_INDEX_NAME).id(docId)).actionGet().isExists(), is(true)); + + assertAsync(listener -> transformConfigManager.deleteOldTransformConfigurations(transformId, listener), true, null, null); + + client().admin().indices().refresh(new RefreshRequest(TransformInternalIndex.INDEX_NAME_PATTERN)).actionGet(); + assertThat(client().get(new GetRequest(oldIndex).id(docId)).actionGet().isExists(), is(false)); + assertThat(client().get(new GetRequest(TransformInternalIndex.LATEST_INDEX_NAME).id(docId)).actionGet().isExists(), is(true)); + } + + public void testDeleteOldTransformStoredDocuments() throws Exception { + String oldIndex = TransformInternalIndex.INDEX_PATTERN + "1"; + String transformId = "transform_test_delete_old_stored_documents"; + String docId = TransformStoredDoc.documentId(transformId); + TransformStoredDoc transformStoredDoc = TransformStoredDocTests + .randomTransformStoredDoc(transformId); + client().admin().indices().create(new CreateIndexRequest(oldIndex) + .mapping(MapperService.SINGLE_MAPPING_NAME, mappings())).actionGet(); + + try(XContentBuilder builder = XContentFactory.jsonBuilder()) { + XContentBuilder source = transformStoredDoc.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); + IndexRequest request = new IndexRequest(oldIndex) + .source(source) + .id(docId); + client().index(request).actionGet(); + } + + // Put when referencing the old index should create the doc in the new index, even if we have seqNo|primaryTerm info + assertAsync(listener -> transformConfigManager.putOrUpdateTransformStoredDoc(transformStoredDoc, + new SeqNoPrimaryTermAndIndex(3, 1, oldIndex), + listener), + new SeqNoPrimaryTermAndIndex(0, 1, TransformInternalIndex.LATEST_INDEX_NAME), + null, + null); + + client().admin().indices().refresh(new RefreshRequest(TransformInternalIndex.INDEX_NAME_PATTERN)).actionGet(); + + assertThat(client().get(new GetRequest(oldIndex).id(docId)).actionGet().isExists(), is(true)); + assertThat(client().get(new GetRequest(TransformInternalIndex.LATEST_INDEX_NAME).id(docId)).actionGet().isExists(), is(true)); + + assertAsync(listener -> transformConfigManager.deleteOldTransformStoredDocuments(transformId, listener), + true, + null, + null); + + client().admin().indices().refresh(new RefreshRequest(TransformInternalIndex.INDEX_NAME_PATTERN)).actionGet(); + assertThat(client().get(new GetRequest(oldIndex).id(docId)).actionGet().isExists(), is(false)); + assertThat(client().get(new GetRequest(TransformInternalIndex.LATEST_INDEX_NAME).id(docId)).actionGet().isExists(), is(true)); + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndexTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java similarity index 82% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndexTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java index 2ff7756c82f..866f2359121 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndexTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.persistence; +package org.elasticsearch.xpack.transform.persistence; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; @@ -13,7 +13,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfigTests; import org.mockito.ArgumentCaptor; import java.io.IOException; @@ -32,11 +32,11 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; -public class DataframeIndexTests extends ESTestCase { +public class TransformIndexTests extends ESTestCase { private static final String TRANSFORM_ID = "some-random-transform-id"; private static final int CURRENT_TIME_MILLIS = 123456789; - private static final String CREATED_BY = "data-frame-transform"; + private static final String CREATED_BY = "transform"; private Client client = mock(Client.class); private Clock clock = Clock.fixed(Instant.ofEpochMilli(CURRENT_TIME_MILLIS), ZoneId.systemDefault()); @@ -51,10 +51,10 @@ public class DataframeIndexTests extends ESTestCase { }) .when(client).execute(any(), any(), any()); - DataframeIndex.createDestinationIndex( + TransformIndex.createDestinationIndex( client, clock, - DataFrameTransformConfigTests.randomDataFrameTransformConfig(TRANSFORM_ID), + TransformConfigTests.randomTransformConfig(TRANSFORM_ID), new HashMap<>(), ActionListener.wrap( value -> assertTrue(value), @@ -67,8 +67,8 @@ public class DataframeIndexTests extends ESTestCase { CreateIndexRequest createIndexRequest = createIndexRequestCaptor.getValue(); try (XContentParser parser = createParser(JsonXContent.jsonXContent, createIndexRequest.mappings().get("_doc"))) { Map map = parser.map(); - assertThat(extractValue("_doc._meta._data_frame.transform", map), equalTo(TRANSFORM_ID)); - assertThat(extractValue("_doc._meta._data_frame.creation_date_in_millis", map), equalTo(CURRENT_TIME_MILLIS)); + assertThat(extractValue("_doc._meta._transform.transform", map), equalTo(TRANSFORM_ID)); + assertThat(extractValue("_doc._meta._transform.creation_date_in_millis", map), equalTo(CURRENT_TIME_MILLIS)); assertThat(extractValue("_doc._meta.created_by", map), equalTo(CREATED_BY)); } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndexTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndexTests.java new file mode 100644 index 00000000000..23249ca4fbf --- /dev/null +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndexTests.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.transform.persistence; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.AdminClient; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class TransformInternalIndexTests extends ESTestCase { + + public static ClusterState STATE_WITH_LATEST_VERSIONED_INDEX_TEMPLATE; + + static { + ImmutableOpenMap.Builder mapBuilder = ImmutableOpenMap.builder(); + try { + mapBuilder.put(TransformInternalIndex.LATEST_INDEX_VERSIONED_NAME, TransformInternalIndex.getIndexTemplateMetaData()); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + MetaData.Builder metaBuilder = MetaData.builder(); + metaBuilder.templates(mapBuilder.build()); + ClusterState.Builder csBuilder = ClusterState.builder(ClusterName.DEFAULT); + csBuilder.metaData(metaBuilder.build()); + STATE_WITH_LATEST_VERSIONED_INDEX_TEMPLATE = csBuilder.build(); + } + + public void testHaveLatestVersionedIndexTemplate() { + + assertTrue(TransformInternalIndex.haveLatestVersionedIndexTemplate(STATE_WITH_LATEST_VERSIONED_INDEX_TEMPLATE)); + assertFalse(TransformInternalIndex.haveLatestVersionedIndexTemplate(ClusterState.EMPTY_STATE)); + } + + public void testInstallLatestVersionedIndexTemplateIfRequired_GivenNotRequired() { + + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.state()).thenReturn(TransformInternalIndexTests.STATE_WITH_LATEST_VERSIONED_INDEX_TEMPLATE); + + Client client = mock(Client.class); + + AtomicBoolean gotResponse = new AtomicBoolean(false); + ActionListener testListener = ActionListener.wrap(aVoid -> gotResponse.set(true), e -> fail(e.getMessage())); + + TransformInternalIndex.installLatestVersionedIndexTemplateIfRequired(clusterService, client, testListener); + + assertTrue(gotResponse.get()); + verifyNoMoreInteractions(client); + } + + public void testInstallLatestVersionedIndexTemplateIfRequired_GivenRequired() { + + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.state()).thenReturn(ClusterState.EMPTY_STATE); + + IndicesAdminClient indicesClient = mock(IndicesAdminClient.class); + doAnswer( + invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(new AcknowledgedResponse(true)); + return null; + }).when(indicesClient).putTemplate(any(), any()); + + AdminClient adminClient = mock(AdminClient.class); + when(adminClient.indices()).thenReturn(indicesClient); + Client client = mock(Client.class); + when(client.admin()).thenReturn(adminClient); + + ThreadPool threadPool = mock(ThreadPool.class); + when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + when(client.threadPool()).thenReturn(threadPool); + + AtomicBoolean gotResponse = new AtomicBoolean(false); + ActionListener testListener = ActionListener.wrap(aVoid -> gotResponse.set(true), e -> fail(e.getMessage())); + + TransformInternalIndex.installLatestVersionedIndexTemplateIfRequired(clusterService, client, testListener); + + assertTrue(gotResponse.get()); + verify(client, times(1)).threadPool(); + verify(client, times(1)).admin(); + verifyNoMoreInteractions(client); + verify(adminClient, times(1)).indices(); + verifyNoMoreInteractions(adminClient); + verify(indicesClient, times(1)).putTemplate(any(), any()); + verifyNoMoreInteractions(indicesClient); + } +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformActionTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/rest/action/RestDeleteTransformActionTests.java similarity index 81% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformActionTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/rest/action/RestDeleteTransformActionTests.java index 6a41ff8cd2c..36891461c27 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformActionTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/rest/action/RestDeleteTransformActionTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.rest.action; +package org.elasticsearch.xpack.transform.rest.action; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; @@ -19,10 +19,10 @@ import org.elasticsearch.test.rest.FakeRestRequest; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; -public class RestDeleteDataFrameTransformActionTests extends ESTestCase { +public class RestDeleteTransformActionTests extends ESTestCase { public void testBodyRejection() throws Exception { - final RestDeleteDataFrameTransformAction handler = new RestDeleteDataFrameTransformAction( + final RestDeleteTransformAction handler = new RestDeleteTransformAction( mock(RestController.class)); try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.startObject(); @@ -36,7 +36,7 @@ public class RestDeleteDataFrameTransformActionTests extends ESTestCase { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> handler.prepareRequest(request, mock(NodeClient.class))); - assertThat(e.getMessage(), equalTo("delete data frame transforms requests can not have a request body")); + assertThat(e.getMessage(), equalTo("delete transform requests can not have a request body")); } } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java similarity index 69% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index e4c0085f9d9..99a454c5f1a 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -4,21 +4,21 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.transform.transforms; import org.elasticsearch.client.Client; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; -import org.elasticsearch.xpack.dataframe.checkpoint.CheckpointProvider; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.transform.checkpoint.CheckpointProvider; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; import java.time.Instant; import java.util.Collections; @@ -31,38 +31,38 @@ import java.util.stream.IntStream; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class ClientDataFrameIndexerTests extends ESTestCase { +public class ClientTransformIndexerTests extends ESTestCase { public void testAudiOnFinishFrequency() { ThreadPool threadPool = mock(ThreadPool.class); when(threadPool.executor("generic")).thenReturn(mock(ExecutorService.class)); - DataFrameTransformTask parentTask = new DataFrameTransformTask(1, - "dataframe", + TransformTask parentTask = new TransformTask(1, + "transform", "ptask", - new TaskId("dataframe:1"), - mock(DataFrameTransform.class), + new TaskId("transform:1"), + mock(TransformTaskParams.class), null, mock(SchedulerEngine.class), - mock(DataFrameAuditor.class), + mock(TransformAuditor.class), threadPool, Collections.emptyMap()); - ClientDataFrameIndexer indexer = new ClientDataFrameIndexer( - mock(DataFrameTransformsConfigManager.class), + ClientTransformIndexer indexer = new ClientTransformIndexer( + mock(TransformConfigManager.class), mock(CheckpointProvider.class), new AtomicReference<>(IndexerState.STOPPED), null, mock(Client.class), - mock(DataFrameAuditor.class), - mock(DataFrameIndexerTransformStats.class), - mock(DataFrameTransformConfig.class), + mock(TransformAuditor.class), + mock(TransformIndexerStats.class), + mock(TransformConfig.class), Collections.emptyMap(), null, - new DataFrameTransformCheckpoint("transform", + new TransformCheckpoint("transform", Instant.now().toEpochMilli(), 0L, Collections.emptyMap(), Instant.now().toEpochMilli()), - new DataFrameTransformCheckpoint("transform", + new TransformCheckpoint("transform", Instant.now().toEpochMilli(), 2L, Collections.emptyMap(), diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/IDGeneratorTests.java similarity index 97% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/IDGeneratorTests.java index 3ce5dd81558..68cdcb2ee15 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/IDGeneratorTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.transform.transforms; import org.elasticsearch.test.ESTestCase; diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/SourceDestValidatorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/SourceDestValidatorTests.java similarity index 83% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/SourceDestValidatorTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/SourceDestValidatorTests.java index c9f4a0bc06b..add6a0c27b9 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/SourceDestValidatorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/SourceDestValidatorTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.transform.transforms; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; @@ -17,10 +17,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.DestConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.DestConfig; +import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfigTests; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; @@ -64,12 +64,12 @@ public class SourceDestValidatorTests extends ESTestCase { } public void testCheck_GivenSimpleSourceIndexAndValidDestIndex() { - DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig(SOURCE_1), new DestConfig("dest", null)); + TransformConfig config = createTransform(new SourceConfig(SOURCE_1), new DestConfig("dest", null)); SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false); } public void testCheck_GivenMissingConcreteSourceIndex() { - DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig("missing"), new DestConfig("dest", null)); + TransformConfig config = createTransform(new SourceConfig("missing"), new DestConfig("dest", null)); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); @@ -79,7 +79,7 @@ public class SourceDestValidatorTests extends ESTestCase { } public void testCheck_GivenMissingWildcardSourceIndex() { - DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig("missing*"), new DestConfig("dest", null)); + TransformConfig config = createTransform(new SourceConfig("missing*"), new DestConfig("dest", null)); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); @@ -89,7 +89,7 @@ public class SourceDestValidatorTests extends ESTestCase { } public void testCheck_GivenDestIndexSameAsSourceIndex() { - DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig(SOURCE_1), new DestConfig("source-1", null)); + TransformConfig config = createTransform(new SourceConfig(SOURCE_1), new DestConfig("source-1", null)); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); @@ -99,7 +99,7 @@ public class SourceDestValidatorTests extends ESTestCase { } public void testCheck_GivenDestIndexMatchesSourceIndex() { - DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig("source-*"), new DestConfig(SOURCE_2, null)); + TransformConfig config = createTransform(new SourceConfig("source-*"), new DestConfig(SOURCE_2, null)); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); @@ -109,7 +109,7 @@ public class SourceDestValidatorTests extends ESTestCase { } public void testCheck_GivenDestIndexMatchesOneOfSourceIndices() { - DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig("source-1", "source-*"), + TransformConfig config = createTransform(new SourceConfig("source-1", "source-*"), new DestConfig(SOURCE_2, null)); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, @@ -120,7 +120,7 @@ public class SourceDestValidatorTests extends ESTestCase { } public void testCheck_GivenDestIndexIsAliasThatMatchesMultipleIndices() { - DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig(SOURCE_1), new DestConfig("dest-alias", null)); + TransformConfig config = createTransform(new SourceConfig(SOURCE_1), new DestConfig("dest-alias", null)); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); @@ -136,7 +136,7 @@ public class SourceDestValidatorTests extends ESTestCase { } public void testCheck_GivenDestIndexIsAliasThatIsIncludedInSource() { - DataFrameTransformConfig config = createDataFrameTransform(new SourceConfig(SOURCE_1), new DestConfig("source-1-alias", null)); + TransformConfig config = createTransform(new SourceConfig(SOURCE_1), new DestConfig("source-1-alias", null)); ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), false)); @@ -147,8 +147,8 @@ public class SourceDestValidatorTests extends ESTestCase { SourceDestValidator.validate(config, CLUSTER_STATE, new IndexNameExpressionResolver(), true); } - private static DataFrameTransformConfig createDataFrameTransform(SourceConfig sourceConfig, DestConfig destConfig) { - return new DataFrameTransformConfig("test", + private static TransformConfig createTransform(SourceConfig sourceConfig, DestConfig destConfig) { + return new TransformConfig("test", sourceConfig, destConfig, TimeValue.timeValueSeconds(60), diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java similarity index 82% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java index 7de80b12a03..528c49eb1da 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.transform.transforms; import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.ActionListener; @@ -27,17 +27,17 @@ import org.elasticsearch.search.profile.SearchProfileShardResults; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerPosition; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.AggregationConfigTests; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfigTests; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.indexing.IterationResult; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerPosition; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; +import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.AggregationConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.pivot.GroupConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfig; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.transforms.pivot.Pivot; import org.junit.Before; import java.io.PrintWriter; @@ -52,8 +52,8 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.Function; -import static org.elasticsearch.xpack.core.dataframe.transforms.DestConfigTests.randomDestConfig; -import static org.elasticsearch.xpack.core.dataframe.transforms.SourceConfigTests.randomSourceConfig; +import static org.elasticsearch.xpack.core.transform.transforms.DestConfigTests.randomDestConfig; +import static org.elasticsearch.xpack.core.transform.transforms.SourceConfigTests.randomSourceConfig; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.empty; @@ -65,11 +65,11 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -public class DataFrameIndexerTests extends ESTestCase { +public class TransformIndexerTests extends ESTestCase { private Client client; - class MockedDataFrameIndexer extends DataFrameIndexer { + class MockedTransformIndexer extends TransformIndexer { private final Function searchFunction; private final Function bulkFunction; @@ -78,19 +78,19 @@ public class DataFrameIndexerTests extends ESTestCase { // used for synchronizing with the test private CountDownLatch latch; - MockedDataFrameIndexer( + MockedTransformIndexer( Executor executor, - DataFrameTransformConfig transformConfig, + TransformConfig transformConfig, Map fieldMappings, - DataFrameAuditor auditor, + TransformAuditor auditor, AtomicReference initialState, - DataFrameIndexerPosition initialPosition, - DataFrameIndexerTransformStats jobStats, + TransformIndexerPosition initialPosition, + TransformIndexerStats jobStats, Function searchFunction, Function bulkFunction, Consumer failureConsumer) { super(executor, auditor, transformConfig, fieldMappings, initialState, initialPosition, jobStats, - /* DataFrameTransformProgress */ null, DataFrameTransformCheckpoint.EMPTY, DataFrameTransformCheckpoint.EMPTY); + /* TransformProgress */ null, TransformCheckpoint.EMPTY, TransformCheckpoint.EMPTY); this.searchFunction = searchFunction; this.bulkFunction = bulkFunction; this.failureConsumer = failureConsumer; @@ -101,8 +101,8 @@ public class DataFrameIndexerTests extends ESTestCase { } @Override - protected void createCheckpoint(ActionListener listener) { - listener.onResponse(DataFrameTransformCheckpoint.EMPTY); + protected void createCheckpoint(ActionListener listener) { + listener.onResponse(TransformCheckpoint.EMPTY); } @Override @@ -145,7 +145,7 @@ public class DataFrameIndexerTests extends ESTestCase { } @Override - protected void doSaveState(IndexerState state, DataFrameIndexerPosition position, Runnable next) { + protected void doSaveState(IndexerState state, TransformIndexerPosition position, Runnable next) { assert state == IndexerState.STARTED || state == IndexerState.INDEXING || state == IndexerState.STOPPED; next.run(); } @@ -153,7 +153,7 @@ public class DataFrameIndexerTests extends ESTestCase { @Override protected void onFailure(Exception exc) { try { - // mimic same behavior as {@link DataFrameTransformTask} + // mimic same behavior as {@link TransformTask} if (handleCircuitBreakingException(exc)) { return; } @@ -197,7 +197,7 @@ public class DataFrameIndexerTests extends ESTestCase { public void testPageSizeAdapt() throws InterruptedException { Integer pageSize = randomBoolean() ? null : randomIntBetween(500, 10_000); - DataFrameTransformConfig config = new DataFrameTransformConfig(randomAlphaOfLength(10), + TransformConfig config = new TransformConfig(randomAlphaOfLength(10), randomSourceConfig(), randomDestConfig(), null, @@ -223,10 +223,10 @@ public class DataFrameIndexerTests extends ESTestCase { final ExecutorService executor = Executors.newFixedThreadPool(1); try { - DataFrameAuditor auditor = new DataFrameAuditor(client, "node_1"); + TransformAuditor auditor = new TransformAuditor(client, "node_1"); - MockedDataFrameIndexer indexer = new MockedDataFrameIndexer(executor, config, Collections.emptyMap(), auditor, state, null, - new DataFrameIndexerTransformStats(), searchFunction, bulkFunction, failureConsumer); + MockedTransformIndexer indexer = new MockedTransformIndexer(executor, config, Collections.emptyMap(), auditor, state, null, + new TransformIndexerStats(), searchFunction, bulkFunction, failureConsumer); final CountDownLatch latch = indexer.newLatch(1); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); @@ -236,7 +236,7 @@ public class DataFrameIndexerTests extends ESTestCase { awaitBusy(() -> indexer.getState() == IndexerState.STOPPED); long pageSizeAfterFirstReduction = indexer.getPageSize(); assertThat(initialPageSize, greaterThan(pageSizeAfterFirstReduction)); - assertThat(pageSizeAfterFirstReduction, greaterThan((long)DataFrameIndexer.MINIMUM_PAGE_SIZE)); + assertThat(pageSizeAfterFirstReduction, greaterThan((long)TransformIndexer.MINIMUM_PAGE_SIZE)); // run indexer a 2nd time final CountDownLatch secondRunLatch = indexer.newLatch(1); @@ -250,7 +250,7 @@ public class DataFrameIndexerTests extends ESTestCase { // assert that page size has been reduced again assertThat(pageSizeAfterFirstReduction, greaterThan((long)indexer.getPageSize())); - assertThat(pageSizeAfterFirstReduction, greaterThan((long)DataFrameIndexer.MINIMUM_PAGE_SIZE)); + assertThat(pageSizeAfterFirstReduction, greaterThan((long)TransformIndexer.MINIMUM_PAGE_SIZE)); } finally { executor.shutdownNow(); @@ -259,7 +259,7 @@ public class DataFrameIndexerTests extends ESTestCase { public void testDoProcessAggNullCheck() { Integer pageSize = randomBoolean() ? null : randomIntBetween(500, 10_000); - DataFrameTransformConfig config = new DataFrameTransformConfig(randomAlphaOfLength(10), + TransformConfig config = new TransformConfig(randomAlphaOfLength(10), randomSourceConfig(), randomDestConfig(), null, @@ -288,12 +288,12 @@ public class DataFrameIndexerTests extends ESTestCase { final ExecutorService executor = Executors.newFixedThreadPool(1); try { - DataFrameAuditor auditor = mock(DataFrameAuditor.class); + TransformAuditor auditor = mock(TransformAuditor.class); - MockedDataFrameIndexer indexer = new MockedDataFrameIndexer(executor, config, Collections.emptyMap(), auditor, state, null, - new DataFrameIndexerTransformStats(), searchFunction, bulkFunction, failureConsumer); + MockedTransformIndexer indexer = new MockedTransformIndexer(executor, config, Collections.emptyMap(), auditor, state, null, + new TransformIndexerStats(), searchFunction, bulkFunction, failureConsumer); - IterationResult newPosition = indexer.doProcess(searchResponse); + IterationResult newPosition = indexer.doProcess(searchResponse); assertThat(newPosition.getToIndex(), is(empty())); assertThat(newPosition.getPosition(), is(nullValue())); assertThat(newPosition.isDone(), is(true)); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java similarity index 75% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutorTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java index ee30609e1a5..2b51d0f28a2 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms; +package org.elasticsearch.xpack.transform.transforms; import org.elasticsearch.Version; import org.elasticsearch.client.Client; @@ -29,12 +29,13 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; -import org.elasticsearch.xpack.dataframe.checkpoint.DataFrameTransformsCheckpointService; -import org.elasticsearch.xpack.dataframe.notifications.DataFrameAuditor; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; -import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; +import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; +import org.elasticsearch.xpack.transform.checkpoint.TransformCheckpointService; +import org.elasticsearch.xpack.transform.notifications.TransformAuditor; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndex; +import org.elasticsearch.xpack.transform.persistence.TransformInternalIndexTests; +import org.elasticsearch.xpack.transform.persistence.TransformConfigManager; import java.util.ArrayList; import java.util.Arrays; @@ -46,24 +47,24 @@ import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class DataFrameTransformPersistentTasksExecutorTests extends ESTestCase { +public class TransformPersistentTasksExecutorTests extends ESTestCase { public void testNodeVersionAssignment() { MetaData.Builder metaData = MetaData.builder(); RoutingTable.Builder routingTable = RoutingTable.builder(); addIndices(metaData, routingTable); PersistentTasksCustomMetaData.Builder pTasksBuilder = PersistentTasksCustomMetaData.builder() - .addTask("data-frame-task-1", - DataFrameTransform.NAME, - new DataFrameTransform("data-frame-task-1", Version.CURRENT, null), + .addTask("transform-task-1", + TransformTaskParams.NAME, + new TransformTaskParams("transform-task-1", Version.CURRENT, null), new PersistentTasksCustomMetaData.Assignment("current-data-node-with-1-tasks", "")) - .addTask("data-frame-task-2", - DataFrameTransform.NAME, - new DataFrameTransform("data-frame-task-2", Version.CURRENT, null), + .addTask("transform-task-2", + TransformTaskParams.NAME, + new TransformTaskParams("transform-task-2", Version.CURRENT, null), new PersistentTasksCustomMetaData.Assignment("current-data-node-with-2-tasks", "")) - .addTask("data-frame-task-3", - DataFrameTransform.NAME, - new DataFrameTransform("data-frame-task-3", Version.CURRENT, null), + .addTask("transform-task-3", + TransformTaskParams.NAME, + new TransformTaskParams("transform-task-3", Version.CURRENT, null), new PersistentTasksCustomMetaData.Assignment("current-data-node-with-2-tasks", "")); PersistentTasksCustomMetaData pTasks = pTasksBuilder.build(); @@ -99,25 +100,26 @@ public class DataFrameTransformPersistentTasksExecutorTests extends ESTestCase { ClusterState cs = csBuilder.build(); Client client = mock(Client.class); - DataFrameAuditor mockAuditor = mock(DataFrameAuditor.class); - DataFrameTransformsConfigManager transformsConfigManager = new DataFrameTransformsConfigManager(client, xContentRegistry()); - DataFrameTransformsCheckpointService dataFrameTransformsCheckpointService = new DataFrameTransformsCheckpointService(client, + TransformAuditor mockAuditor = mock(TransformAuditor.class); + TransformConfigManager transformsConfigManager = new TransformConfigManager(client, xContentRegistry()); + TransformCheckpointService transformCheckpointService = new TransformCheckpointService(client, transformsConfigManager, mockAuditor); ClusterSettings cSettings = new ClusterSettings(Settings.EMPTY, - Collections.singleton(DataFrameTransformTask.NUM_FAILURE_RETRIES_SETTING)); + Collections.singleton(TransformTask.NUM_FAILURE_RETRIES_SETTING)); ClusterService clusterService = mock(ClusterService.class); when(clusterService.getClusterSettings()).thenReturn(cSettings); - DataFrameTransformPersistentTasksExecutor executor = new DataFrameTransformPersistentTasksExecutor(client, + when(clusterService.state()).thenReturn(TransformInternalIndexTests.STATE_WITH_LATEST_VERSIONED_INDEX_TEMPLATE); + TransformPersistentTasksExecutor executor = new TransformPersistentTasksExecutor(client, transformsConfigManager, - dataFrameTransformsCheckpointService, mock(SchedulerEngine.class), - new DataFrameAuditor(client, ""), + transformCheckpointService, mock(SchedulerEngine.class), + new TransformAuditor(client, ""), mock(ThreadPool.class), clusterService, Settings.EMPTY); - assertThat(executor.getAssignment(new DataFrameTransform("new-task-id", Version.CURRENT, null), cs).getExecutorNode(), + assertThat(executor.getAssignment(new TransformTaskParams("new-task-id", Version.CURRENT, null), cs).getExecutorNode(), equalTo("current-data-node-with-1-tasks")); - assertThat(executor.getAssignment(new DataFrameTransform("new-old-task-id", Version.V_7_2_0, null), cs).getExecutorNode(), + assertThat(executor.getAssignment(new TransformTaskParams("new-old-task-id", Version.V_7_2_0, null), cs).getExecutorNode(), equalTo("past-data-node-1")); } @@ -131,11 +133,11 @@ public class DataFrameTransformPersistentTasksExecutorTests extends ESTestCase { csBuilder.metaData(metaData); ClusterState cs = csBuilder.build(); - assertEquals(0, DataFrameTransformPersistentTasksExecutor.verifyIndicesPrimaryShardsAreActive(cs).size()); + assertEquals(0, TransformPersistentTasksExecutor.verifyIndicesPrimaryShardsAreActive(cs).size()); metaData = new MetaData.Builder(cs.metaData()); routingTable = new RoutingTable.Builder(cs.routingTable()); - String indexToRemove = DataFrameInternalIndex.LATEST_INDEX_NAME; + String indexToRemove = TransformInternalIndex.LATEST_INDEX_NAME; if (randomBoolean()) { routingTable.remove(indexToRemove); } else { @@ -150,15 +152,15 @@ public class DataFrameTransformPersistentTasksExecutorTests extends ESTestCase { csBuilder.routingTable(routingTable.build()); csBuilder.metaData(metaData); - List result = DataFrameTransformPersistentTasksExecutor.verifyIndicesPrimaryShardsAreActive(csBuilder.build()); + List result = TransformPersistentTasksExecutor.verifyIndicesPrimaryShardsAreActive(csBuilder.build()); assertEquals(1, result.size()); assertEquals(indexToRemove, result.get(0)); } private void addIndices(MetaData.Builder metaData, RoutingTable.Builder routingTable) { List indices = new ArrayList<>(); - indices.add(DataFrameInternalIndex.AUDIT_INDEX); - indices.add(DataFrameInternalIndex.LATEST_INDEX_NAME); + indices.add(TransformInternalIndex.AUDIT_INDEX); + indices.add(TransformInternalIndex.LATEST_INDEX_NAME); for (String indexName : indices) { IndexMetaData.Builder indexMetaData = IndexMetaData.builder(indexName); indexMetaData.settings(Settings.builder() diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtilsTests.java similarity index 98% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtilsTests.java index 350ddf19423..a2b878ee7e0 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtilsTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms.pivot; +package org.elasticsearch.xpack.transform.transforms.pivot; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.geo.GeoPoint; @@ -56,9 +56,9 @@ import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket; import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.dataframe.DataFrameField; -import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; +import org.elasticsearch.xpack.core.transform.TransformField; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.pivot.GroupConfig; import java.io.IOException; import java.util.Arrays; @@ -717,7 +717,7 @@ public class AggregationResultUtilsTests extends ESTestCase { "value", 122.55), DOC_COUNT, 44) )); - DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); + TransformIndexerStats stats = new TransformIndexerStats(); Map fieldTypeMap = asStringMap( aggName, "double", @@ -734,14 +734,14 @@ public class AggregationResultUtilsTests extends ESTestCase { Set documentIdsFirstRun = new HashSet<>(); resultFirstRun.forEach(m -> { - documentIdsFirstRun.add((String) m.get(DataFrameField.DOCUMENT_ID_FIELD)); + documentIdsFirstRun.add((String) m.get(TransformField.DOCUMENT_ID_FIELD)); }); assertEquals(4, documentIdsFirstRun.size()); Set documentIdsSecondRun = new HashSet<>(); resultSecondRun.forEach(m -> { - documentIdsSecondRun.add((String) m.get(DataFrameField.DOCUMENT_ID_FIELD)); + documentIdsSecondRun.add((String) m.get(TransformField.DOCUMENT_ID_FIELD)); }); assertEquals(4, documentIdsSecondRun.size()); @@ -945,7 +945,7 @@ public class AggregationResultUtilsTests extends ESTestCase { Map fieldTypeMap, List> expected, long expectedDocCounts) throws IOException { - DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); + TransformIndexerStats stats = new TransformIndexerStats(); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); builder.map(input); @@ -959,7 +959,7 @@ public class AggregationResultUtilsTests extends ESTestCase { // remove the document ids and test uniqueness Set documentIds = new HashSet<>(); result.forEach(m -> { - documentIds.add((String) m.remove(DataFrameField.DOCUMENT_ID_FIELD)); + documentIds.add((String) m.remove(TransformField.DOCUMENT_ID_FIELD)); }); assertEquals(result.size(), documentIds.size()); @@ -973,7 +973,7 @@ public class AggregationResultUtilsTests extends ESTestCase { Collection pipelineAggregationBuilders, Map input, Map fieldTypeMap, - DataFrameIndexerTransformStats stats) throws IOException { + TransformIndexerStats stats) throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); builder.map(input); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationsTests.java similarity index 98% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationsTests.java index 12f7f2e6032..ee66d20859f 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationsTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationsTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms.pivot; +package org.elasticsearch.xpack.transform.transforms.pivot; import org.elasticsearch.test.ESTestCase; diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java similarity index 96% rename from x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java rename to x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java index 3c26770ebb2..510c19df337 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java @@ -4,14 +4,14 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.dataframe.transforms.pivot; +package org.elasticsearch.xpack.transform.transforms.pivot; import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponseSections; @@ -28,12 +28,12 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; -import org.elasticsearch.xpack.core.dataframe.transforms.QueryConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.AggregationConfig; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfigTests; -import org.elasticsearch.xpack.core.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.xpack.dataframe.transforms.pivot.Aggregations.AggregationType; +import org.elasticsearch.xpack.core.transform.transforms.QueryConfig; +import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.AggregationConfig; +import org.elasticsearch.xpack.core.transform.transforms.pivot.GroupConfigTests; +import org.elasticsearch.xpack.core.transform.transforms.pivot.PivotConfig; +import org.elasticsearch.xpack.transform.transforms.pivot.Aggregations.AggregationType; import org.junit.After; import org.junit.Before; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/AsyncTriggerEventConsumer.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/AsyncTriggerEventConsumer.java index bd48534589e..c63b34984f1 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/AsyncTriggerEventConsumer.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/AsyncTriggerEventConsumer.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.watcher.execution; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; @@ -16,7 +16,7 @@ import java.util.function.Consumer; import static java.util.stream.StreamSupport.stream; public class AsyncTriggerEventConsumer implements Consumer> { - private static final Logger logger = LogManager.getLogger(SyncTriggerEventConsumer.class); + private static final Logger logger = LogManager.getLogger(AsyncTriggerEventConsumer.class); private final ExecutionService executionService; public AsyncTriggerEventConsumer(ExecutionService executionService) { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailSslTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailSslTests.java index c4b0b657b9d..70d7f2f6dd5 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailSslTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailSslTests.java @@ -12,6 +12,8 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ssl.CertParsingUtils; +import org.elasticsearch.xpack.core.ssl.PemUtils; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.watch.Payload; @@ -31,7 +33,8 @@ import javax.mail.internet.MimeMessage; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLException; import java.io.IOException; -import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.util.ArrayList; @@ -50,18 +53,26 @@ public class EmailSslTests extends ESTestCase { @Before public void startSmtpServer() throws GeneralSecurityException, IOException { - final KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); + // Keystore and private key will share the same password final char[] keystorePassword = "test-smtp".toCharArray(); - try (InputStream is = getDataInputStream("test-smtp.p12")) { - keyStore.load(is, keystorePassword); - } + final Path tempDir = createTempDir(); + final Path certPath = tempDir.resolve("test-smtp.crt"); + final Path keyPath = tempDir.resolve("test-smtp.pem"); + Files.copy(getDataPath("/org/elasticsearch/xpack/watcher/actions/email/test-smtp.crt"), certPath); + Files.copy(getDataPath("/org/elasticsearch/xpack/watcher/actions/email/test-smtp.pem"), keyPath); + KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); + keyStore.load(null, keystorePassword); + keyStore.setKeyEntry("test-smtp", PemUtils.readPrivateKey(keyPath, keystorePassword::clone), keystorePassword, + CertParsingUtils.readCertificates(Collections.singletonList(certPath))); final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, keystorePassword).build(); server = EmailServer.localhost(logger, sslContext); } @After public void stopSmtpServer() { - server.stop(); + if (null != server) { + server.stop(); + } } public void testFailureSendingMessageToSmtpServerWithUntrustedCertificateAuthority() throws Exception { @@ -96,6 +107,7 @@ public class EmailSslTests extends ESTestCase { } public void testCanSendMessageToSmtpServerByDisablingVerification() throws Exception { + assumeFalse("Can't run in a FIPS JVM with verification mode None", inFipsJvm()); List messages = new ArrayList<>(); server.addListener(messages::add); try { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java index d42ddfeb2fe..b326b3aeebf 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/ExecutionServiceTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.watcher.execution; import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; @@ -1098,7 +1099,8 @@ public class ExecutionServiceTests extends ESTestCase { } PlainActionFuture future = PlainActionFuture.newFuture(); - future.onResponse(new UpdateResponse(null, null, null, null, 0, 0, 0, null)); + future.onResponse(new UpdateResponse(null, new ShardId("test", "test", 0), "_doc", "test", 0, 0, 0, + DocWriteResponse.Result.CREATED)); return future; }).when(client).update(any()); diff --git a/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/ca.crt b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/ca.crt new file mode 100644 index 00000000000..16fce6b7389 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/ca.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDSTCCAjGgAwIBAgIUWcS0sZGBePVMAYWycyuWzSZYWQswDQYJKoZIhvcNAQEL +BQAwNDEyMDAGA1UEAxMpRWxhc3RpYyBDZXJ0aWZpY2F0ZSBUb29sIEF1dG9nZW5l +cmF0ZWQgQ0EwHhcNMTkwODA3MDUxMDUzWhcNMjIwODA2MDUxMDUzWjA0MTIwMAYD +VQQDEylFbGFzdGljIENlcnRpZmljYXRlIFRvb2wgQXV0b2dlbmVyYXRlZCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKqIbuH52X93CF8M7hiCvNVf +HO9qC/I+UBzYVXt03dU9tFTxilgLRNFwC+3O7uxu8P5OH7qUdIiwdLjQ6+5cfA+R +eL9YbSOQBydmk0bH+MK5lJkrdyHZEWSHbI2Urr87aMUmHTGbQoNzzk61XifS4vlS +GcqsoWteV56IbWNyYTu8EC2i7c2ZJS759aTK02dlxpdymfoTC+O1uWIGUBki5Cqe +rKd9dzEVRWLEb6NfhCMUeUQ09TjGVzHjk4RAY+CcNiy3RufDIQ4pUEdiky/vPl/f +Y/oDsFVW2KUVjzKM4dzDuQOe4KxuqQGojfHtPPJFHoYLXQ7TdewF025ns9T7tCUC +AwEAAaNTMFEwHQYDVR0OBBYEFNPZ3LZtYf4LxJ+jDzGts1cJ8kF7MB8GA1UdIwQY +MBaAFNPZ3LZtYf4LxJ+jDzGts1cJ8kF7MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBAFYUrH+epWXc+7dKwerPrPiqjMOEVB6GhrHb6SJQ5qxeeX+Q +P4rRrylk9XEVk3cgH+5SFygYkmXk8heJ2X0vB1cDdgLz47iXI4lrz1n8TOF+lOlM +e9QsoRNp2iCJ/fYXknr38n+z0QsJLLhz5B0dgpd8ASbGir7cG9+DF3R8DmbcTpR7 +tHJA9XTDsJmzFv9reqieP5Kieg1tioaho/qA0XIxzpOIqDKcWOZLtJE5PuMaUSF8 +RwJRVRF5wBZwFpcQwy0E1/rPsWzehtDZ3S5AyME4vsow1M8e5c+YyHpsZcDSdUtB +t0t0BVNDONjm3WlJ1QYryQJOYp8/ZbdVzwpGdVg= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/test-smtp.crt b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/test-smtp.crt new file mode 100644 index 00000000000..2ab60c6ed48 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/test-smtp.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDOjCCAiKgAwIBAgIVAJJCL6+YymqqtgFngOxqkOOiAtx4MA0GCSqGSIb3DQEB +CwUAMDQxMjAwBgNVBAMTKUVsYXN0aWMgQ2VydGlmaWNhdGUgVG9vbCBBdXRvZ2Vu +ZXJhdGVkIENBMB4XDTE5MDgwNzA1MTA1NFoXDTIyMDgwNjA1MTA1NFowFDESMBAG +A1UEAxMJdGVzdC1zbXRwMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA +kawwFDDphZ484SI62BlIfCI/O8w9KRcSvE8ECELkBRxGjeA6ozF7ctw4rp30L7fU +/RDxbX6o3X4uAMCIwixrvn6rbebggl2WrK3ilIF6Cwotny6dg/qbu30WmDJc7sPp +32t+jGlHyx4I3anSu4C7IJaE1fjZlExxgfsgoV/CtsCmIdPM3qABUHPds3iVd8Q/ ++HESn7/ZWjU2AOsL2V5EbM4AHG5ar6d2zyGMxwmASUpjotjC06FI3PeDGrV/rFlX +K1f8ALrnO9oDQQzwxrWrru8CxVNW5BmJp2aAr/0pp5S05+dozHLYhsWNrb70bFfA +dXsnXRLO8H/CakvhhrM8JQIDAQABo2MwYTAdBgNVHQ4EFgQUsS6DitT6Q+dcIjDj +aQGATdwKVmYwHwYDVR0jBBgwFoAU09nctm1h/gvEn6MPMa2zVwnyQXswFAYDVR0R +BA0wC4IJbG9jYWxob3N0MAkGA1UdEwQCMAAwDQYJKoZIhvcNAQELBQADggEBAByS +BEiNYEWPM99ALWhQy2NkbDKev9Wgv1GEdgh040UkZ9zMf/RpV+C/Lp9QlagHH+lc +LNEeWGOFSTexWv+QbPcoCVVMH4H+JpRWqcwH/zG21lx2eEMPJwrZKC8YElDw+D/7 +qJgCSRKm3H/CfQqdPKtKU0vZjtKXHBt8PDOGMO0475rm95sZv6rrOqlY9LpJ7Cm8 +6o08gnSZpka1ND0HcB13I9L/rsqMsk3clO7r2d10VCCG2A254ElUSjBCFKbWIfh/ +ws/R0OTCd9UnHmlCWjjxoJ8D/1PNefst17WhGCFQLwB1wWRTDyIyZqAVzVyA94sX +tdYxxBNthPY7Z1aEr5o= +-----END CERTIFICATE----- diff --git a/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/test-smtp.pem b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/test-smtp.pem new file mode 100644 index 00000000000..83bc82a5c10 --- /dev/null +++ b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/test-smtp.pem @@ -0,0 +1,30 @@ +-----BEGIN RSA PRIVATE KEY----- +Proc-Type: 4,ENCRYPTED +DEK-Info: AES-128-CBC,AD2AC08225DD9EA7A429BB867D62D2D1 + +Hv/myqMGjejCI3OFUSwyykeAvVMccqe/pntxjVjx9S5tqSr+gnfvKiUsDGPnoDeR +qP9dGKMA94oAgfRFTdk1nYOASB2C+fakMRtstK/N8K3sOsTPsh4oo+0RAM+ErN6Z +MFFkY+K9hxrhEeuD19M0ro8/U+KoKcaaSVuLZHfcJiBKBklOHAhPAKzTsS9u1LuJ +YyMPV6MtYxCfgZi+xdxedAPV0hp4eKZBA38fN6aZGR42Tr2e4aOgnFKGAA9lgyGg +TfZeqaLcxpGTkL4vPSptVdDlU3a4kHcskeJ7/FasYdXOfVU09Awcg3kBEnGHpkmO +6PifuRgsJyfvdUgJPw1Kjgh2a2s0spmWfSrwIAbWTrtBHfg7Pcok7EqeJ8KNH4R1 +UBckUbtCfbsE6E+AnTDbQEiZZOcrn8QYPlyztQGUoZUOikBbEdUzfiHdM9FHKjfi +BD7M+NCwaBmAwdyyN1w9qcbRk6VZm35V4hxCHLKWdi3qeLapOES1RL8OZxsiHzyU +nExL6Lgk1A1Mheb7adNjY153ckhiQvzjGfm9yIoCvm43VSWcI5FIJG90Zy8hl4n0 +UuWlJE6LsG3yJUT8wpAlVuqKF6PXeMWOYpWhtpVdUcIXIahHL8wlsTZ4GeXqXqAb +crgjrG1nwIx8y5QGkXPCKIeM7gPWdz6nJdcg+7tqLTC7bS5h9Zsae8f3k4be/lSg +YcALp5kWWcXAM3rglftN+oo6tgPRtoM8XzRf8h+/f/geN69LMD9Ej/u51JbO0Ca6 +6A19jdODnYo7F/YhxeBQ0znill6uGsNp950qvYo/GX1K4/2GsjlKueKFXDaSk+Ov +YkwrYQrNQsFVqwIWp8HgJ5l8pBw+ZpG4Xd/nzZ+5d5C1Z1VUgweDtgrYiGe2MMDK +0/7QgUkmyIOOHsC2vBwOJ28NnGSENol3FJaK+DXDp/kahADlxTztuJNeh2LhTa8t +yRZq9xJsW/jU7wqOlozk8w74F1V4nZCgBfW8i5Jj7OHWPa2HPgIKgogr7VhyOcZx +/xhSLtVK+8QZNHa08D1Opj8HVhtdoV5jaUEX0T2fVKlaFGWsmMHpo7EDHyq0czVH +MkgvuuqRqhN9zu6HmnXSOlXh/ddjkcfz5AKSxX8cKAyto50xpWQwFalb2YGbRY0n +e4khQrSZ2f72qlINXy24uyNsSyX1VADKdlW7lhxgQrLXUujD7biHuhO/XFi3o/9F +E7TPslr7ykLHJ93qofqsigtygClw2svNT560Qnkq82oS7Sf5upVYLPSCeRzZSmwY +d9x1XXHgO+6OqUc7HSE+OHexccEEuqrx+LBFfAVePb2w9AjvK2yq+fmMMBC+cnLx +xAMEntQxQIWzeBqITG1rr/qq1HB7xYQdFl06wOJxiY+jOFHv3Fpd7rghgXfr15ih +7d0S0B/UBi/IDQ1kkTSxr9HxAmXo4EVjpEOohcFV0bt1ypx6YfD4TNxEqF8Z4lh6 +4mJH2LCOJXjiZ4cnjvgzN/g5SMCKw3mrCjB3p+92HNUgy5Am3AXuZBNYeaAmVgeX +L7Lly3CtNJ8jSNNgM92St5GTHA7Gk4Nz/uNAUYxVjDGNpwVieAAbpNRj6TSBCwtL +-----END RSA PRIVATE KEY----- diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java index a7f7e13bdd8..61854a5396b 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/test/OpenLdapTests.java @@ -85,7 +85,7 @@ public class OpenLdapTests extends ESTestCase { Path truststore = getDataPath(LDAPTRUST_PATH); /* * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. - * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * If we re-use an SSLContext, previously connected sessions can get re-established which breaks hostname * verification tests since a re-established connection does not perform hostname verification. */ MockSecureSettings mockSecureSettings = new MockSecureSettings(); diff --git a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java index 2f732110eec..de1183db193 100644 --- a/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java +++ b/x-pack/qa/openldap-tests/src/test/java/org/elasticsearch/xpack/security/authc/ldap/OpenLdapUserSearchSessionFactoryTests.java @@ -53,7 +53,7 @@ public class OpenLdapUserSearchSessionFactoryTests extends ESTestCase { Path caPath = getDataPath(LDAPCACERT_PATH); /* * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. - * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * If we re-use an SSLContext, previously connected sessions can get re-established which breaks hostname * verification tests since a re-established connection does not perform hostname verification. */ globalSettings = Settings.builder() diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 920505a384d..f28de2f464a 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -160,11 +160,18 @@ for (Version version : bwcVersions.wireCompatible) { oldClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'old_cluster' systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') + + def toBlackList = [] // Dataframe transforms were not added until 7.2.0 if (version.before('7.2.0')) { - systemProperty 'tests.rest.blacklist', [ - 'old_cluster/80_data_frame_jobs_crud/Test put batch data frame transforms on old cluster' - ].join(',') + toBlackList << 'old_cluster/80_data_frame_jobs_crud/Test put batch data frame transforms on old cluster' + } + // continuous Dataframe transforms were not added until 7.3.0 + if (version.before('7.3.0')) { + toBlackList << 'old_cluster/80_data_frame_jobs_crud/Test put continuous data frame transform on old cluster' + } + if (!toBlackList.empty) { + systemProperty 'tests.rest.blacklist', toBlackList.join(',') } } @@ -223,12 +230,17 @@ for (Version version : bwcVersions.wireCompatible) { 'mixed_cluster/30_ml_jobs_crud/Create a job in the mixed cluster and write some data', 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed without aggs in mixed cluster', 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed with aggs in mixed cluster', - 'mixed_cluster/80_data_frame_jobs_crud/Test put batch data frame transforms on mixed cluster' + 'mixed_cluster/80_data_frame_jobs_crud/Test put batch data frame transforms on mixed cluster', + 'mixed_cluster/80_data_frame_jobs_crud/Test put continuous data frame transform on mixed cluster' ] // Dataframe transforms were not added until 7.2.0 if (version.before('7.2.0')) { toBlackList << 'mixed_cluster/80_data_frame_jobs_crud/Test GET, start, and stop old cluster batch transforms' } + // continuous Dataframe transforms were not added until 7.3.0 + if (version.before('7.3.0')) { + toBlackList << 'mixed_cluster/80_data_frame_jobs_crud/Test GET, stop, start, old continuous transforms' + } systemProperty 'tests.rest.blacklist', toBlackList.join(',') finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" } @@ -245,13 +257,19 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.first_round', 'false' systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" + def toBlackList = [] // Dataframe transforms were not added until 7.2.0 if (version.before('7.2.0')) { - systemProperty 'tests.rest.blacklist', [ - 'mixed_cluster/80_data_frame_jobs_crud/Test put batch data frame transforms on mixed cluster', - 'mixed_cluster/80_data_frame_jobs_crud/Test GET, start, and stop old cluster batch transforms' - - ].join(',') + toBlackList << 'mixed_cluster/80_data_frame_jobs_crud/Test put batch data frame transforms on mixed cluster' + toBlackList << 'mixed_cluster/80_data_frame_jobs_crud/Test GET, start, and stop old cluster batch transforms' + } + // continuous Dataframe transforms were not added until 7.3.0 + if (version.before('7.3.0')) { + toBlackList << 'mixed_cluster/80_data_frame_jobs_crud/Test put continuous data frame transform on mixed cluster' + toBlackList << 'mixed_cluster/80_data_frame_jobs_crud/Test GET, stop, start, old continuous transforms' + } + if (!toBlackList.empty) { + systemProperty 'tests.rest.blacklist', toBlackList.join(',') } } @@ -288,6 +306,10 @@ for (Version version : bwcVersions.wireCompatible) { if (version.before('7.2.0')) { toBlackList << 'upgraded_cluster/80_data_frame_jobs_crud/Get start, stop, and delete old and mixed cluster batch data frame transforms' } + // continuous Dataframe transforms were not added until 7.3.0 + if (version.before('7.3.0')) { + toBlackList << 'upgraded_cluster/80_data_frame_jobs_crud/Test GET, stop, delete, old and mixed continuous transforms' + } if (!toBlackList.empty) { systemProperty 'tests.rest.blacklist', toBlackList.join(',') } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataFrameSurvivesUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataFrameSurvivesUpgradeIT.java index 6ba042a07ba..f77e2d78dee 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataFrameSurvivesUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DataFrameSurvivesUpgradeIT.java @@ -11,15 +11,15 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.core.IndexerState; -import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; -import org.elasticsearch.client.dataframe.transforms.DataFrameTransformStats; -import org.elasticsearch.client.dataframe.transforms.DestConfig; -import org.elasticsearch.client.dataframe.transforms.SourceConfig; -import org.elasticsearch.client.dataframe.transforms.TimeSyncConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.GroupConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.PivotConfig; -import org.elasticsearch.client.dataframe.transforms.pivot.TermsGroupSource; +import org.elasticsearch.client.transform.GetDataFrameTransformStatsResponse; +import org.elasticsearch.client.transform.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.transform.transforms.DataFrameTransformStats; +import org.elasticsearch.client.transform.transforms.DestConfig; +import org.elasticsearch.client.transform.transforms.SourceConfig; +import org.elasticsearch.client.transform.transforms.TimeSyncConfig; +import org.elasticsearch.client.transform.transforms.pivot.GroupConfig; +import org.elasticsearch.client.transform.transforms.pivot.PivotConfig; +import org.elasticsearch.client.transform.transforms.pivot.TermsGroupSource; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml index 86a1e6a8daa..f678f36b578 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/80_data_frame_jobs_crud.yml @@ -31,7 +31,6 @@ - match: { transforms.0.id: "mixed-simple-transform" } # Since we are breaking the stats format between 7.3 and 7.4 (allowed because we're beta) we cannot # assert on state in the mixed cluster as it could be state at the top level or state.task_state - # TODO: uncomment this assertion in master #- match: { transforms.0.state: "/started|indexing|stopping|stopped/" } - do: @@ -47,7 +46,6 @@ - match: { transforms.0.id: "mixed-simple-transform" } # Since we are breaking the stats format between 7.3 and 7.4 (allowed because we're beta) we cannot # assert on state in the mixed cluster as it could be state at the top level or state.task_state - # TODO: uncomment this assertion in master #- match: { transforms.0.state: "stopped" } - do: @@ -94,7 +92,6 @@ - match: { transforms.0.id: "mixed-complex-transform" } # Since we are breaking the stats format between 7.3 and 7.4 (allowed because we're beta) we cannot # assert on state in the mixed cluster as it could be state at the top level or state.task_state - # TODO: uncomment this assertion in master #- match: { transforms.0.state: "/started|indexing|stopping|stopped/" } - do: @@ -110,14 +107,72 @@ - match: { transforms.0.id: "mixed-complex-transform" } # Since we are breaking the stats format between 7.3 and 7.4 (allowed because we're beta) we cannot # assert on state in the mixed cluster as it could be state at the top level or state.task_state - # TODO: uncomment this assertion in master #- match: { transforms.0.state: "stopped" } +--- +"Test put continuous data frame transform on mixed cluster": + - do: + cluster.health: + index: "dataframe-transform-airline-data-cont" + wait_for_status: green + timeout: 70s + + - do: + data_frame.put_data_frame_transform: + transform_id: "mixed-simple-continuous-transform" + body: > + { + "source": { "index": "dataframe-transform-airline-data-cont" }, + "dest": { "index": "mixed-simple-continuous-transform-idx" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + }, + "sync": { + "time": { + "field": "time", + "delay": "90m" + } + } + } + - match: { acknowledged: true } + + - do: + data_frame.get_data_frame_transform: + transform_id: "mixed-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "mixed-simple-continuous-transform" } + - match: { transforms.0.sync.time.field: "time" } + - match: { transforms.0.sync.time.delay: "90m" } + - is_true: transforms.0.version + - is_true: transforms.0.create_time + + - do: + data_frame.start_data_frame_transform: + transform_id: "mixed-simple-continuous-transform" + - match: { acknowledged: true } + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "mixed-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "mixed-simple-continuous-transform" } + - match: { transforms.0.state: "/started|indexing/" } + + - do: + data_frame.stop_data_frame_transform: + transform_id: "mixed-simple-continuous-transform" + wait_for_completion: true + - match: { acknowledged: true } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "mixed-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "mixed-simple-continuous-transform" } + - match: { transforms.0.state: "stopped" } + --- "Test GET, start, and stop old cluster batch transforms": - - skip: - reason: "https://github.com/elastic/elasticsearch/issues/44808" - version: "all" - do: cluster.health: index: "dataframe-transform-airline-data" @@ -145,7 +200,6 @@ - match: { transforms.0.id: "old-simple-transform" } # Since we are breaking the stats format between 7.3 and 7.4 (allowed because we're beta) we cannot # assert on state in the mixed cluster as it could be state at the top level or state.task_state - # TODO: uncomment this assertion in master #- match: { transforms.0.state: "/started|indexing|stopping|stopped/" } - do: @@ -160,7 +214,6 @@ - match: { transforms.0.id: "old-simple-transform" } # Since we are breaking the stats format between 7.3 and 7.4 (allowed because we're beta) we cannot # assert on state in the mixed cluster as it could be state at the top level or state.task_state - # TODO: uncomment this assertion in master #- match: { transforms.0.state: "stopped" } - do: @@ -186,7 +239,6 @@ - match: { transforms.0.id: "old-complex-transform" } # Since we are breaking the stats format between 7.3 and 7.4 (allowed because we're beta) we cannot # assert on state in the mixed cluster as it could be state at the top level or state.task_state - # TODO: uncomment this assertion in master #- match: { transforms.0.state: "/started|indexing|stopping|stopped/" } - do: @@ -201,5 +253,46 @@ - match: { transforms.0.id: "old-complex-transform" } # Since we are breaking the stats format between 7.3 and 7.4 (allowed because we're beta) we cannot # assert on state in the mixed cluster as it could be state at the top level or state.task_state - # TODO: uncomment this assertion in master #- match: { transforms.0.state: "stopped" } + +--- +"Test GET, stop, start, old continuous transforms": + - do: + cluster.health: + index: "dataframe-transform-airline-data-cont" + wait_for_status: green + timeout: 70s + + - do: + data_frame.get_data_frame_transform: + transform_id: "old-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "old-simple-continuous-transform" } + - match: { transforms.0.sync.time.field: "time" } + - match: { transforms.0.sync.time.delay: "90m" } + - is_true: transforms.0.version + - is_true: transforms.0.create_time + + - do: + data_frame.start_data_frame_transform: + transform_id: "old-simple-continuous-transform" + - match: { acknowledged: true } + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "old-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "old-simple-continuous-transform" } + - match: { transforms.0.state: "/started|indexing/" } + + - do: + data_frame.stop_data_frame_transform: + transform_id: "old-simple-continuous-transform" + wait_for_completion: true + - match: { acknowledged: true } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "old-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "old-simple-continuous-transform" } + - match: { transforms.0.state: "stopped" } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml index 7d931c2c940..1a25dc1a069 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/80_data_frame_jobs_crud.yml @@ -116,3 +116,77 @@ transform_id: "old-complex-transform" - match: { count: 1 } - match: { transforms.0.id: "old-complex-transform" } + +--- +"Test put continuous data frame transform on old cluster": + - do: + indices.create: + index: dataframe-transform-airline-data-cont + body: + mappings: + properties: + time: + type: date + airline: + type: keyword + responsetime: + type: float + event_rate: + type: integer + - do: + cluster.health: + index: "dataframe-transform-airline-data-cont" + wait_for_status: green + timeout: 70s + + - do: + data_frame.put_data_frame_transform: + transform_id: "old-simple-continuous-transform" + body: > + { + "source": { "index": "dataframe-transform-airline-data-cont" }, + "dest": { "index": "old-simple-continuous-transform-idx" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + }, + "sync": { + "time": { + "field": "time", + "delay": "90m" + } + } + } + - match: { acknowledged: true } + + - do: + data_frame.get_data_frame_transform: + transform_id: "old-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "old-simple-continuous-transform" } + - match: { transforms.0.sync.time.field: "time" } + - match: { transforms.0.sync.time.delay: "90m" } + - is_true: transforms.0.version + - is_true: transforms.0.create_time + + - do: + data_frame.start_data_frame_transform: + transform_id: "old-simple-continuous-transform" + - match: { acknowledged: true } + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "old-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "old-simple-continuous-transform" } + + - do: + data_frame.stop_data_frame_transform: + transform_id: "old-simple-continuous-transform" + wait_for_completion: true + - match: { acknowledged: true } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "old-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "old-simple-continuous-transform" } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 0c5deab1906..ce30d31a993 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -1,5 +1,8 @@ --- "Continue scroll after upgrade": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/46529" - do: get: index: scroll_index diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml index 36df712fc35..3c07098bc97 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/80_data_frame_jobs_crud.yml @@ -157,3 +157,117 @@ setup: data_frame.get_data_frame_transform_stats: transform_id: "old-simple-transform,mixed-simple-transform" - match: { count: 0 } + +--- +"Test GET, stop, delete, old and mixed continuous transforms": + - do: + data_frame.get_data_frame_transform: + transform_id: "old-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "old-simple-continuous-transform" } + - match: { transforms.0.sync.time.field: "time" } + - match: { transforms.0.sync.time.delay: "90m" } + - is_true: transforms.0.version + - is_true: transforms.0.create_time + + - do: + data_frame.start_data_frame_transform: + transform_id: "old-simple-continuous-transform" + - match: { acknowledged: true } + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "old-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "old-simple-continuous-transform" } + - match: { transforms.0.state: "/started|indexing/" } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "old-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "old-simple-continuous-transform" } + - match: { transforms.0.state: "/started|indexing/" } + + - do: + data_frame.stop_data_frame_transform: + transform_id: "old-simple-continuous-transform" + wait_for_completion: true + - match: { acknowledged: true } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "old-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "old-simple-continuous-transform" } + - match: { transforms.0.state: "stopped" } + + - do: + data_frame.delete_data_frame_transform: + transform_id: "old-simple-continuous-transform" + + - do: + data_frame.get_data_frame_transform: + transform_id: "mixed-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "mixed-simple-continuous-transform" } + - match: { transforms.0.sync.time.field: "time" } + - match: { transforms.0.sync.time.delay: "90m" } + - is_true: transforms.0.version + - is_true: transforms.0.create_time + + - do: + data_frame.start_data_frame_transform: + transform_id: "mixed-simple-continuous-transform" + - match: { acknowledged: true } + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "mixed-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "mixed-simple-continuous-transform" } + - match: { transforms.0.state: "/started|indexing/" } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "mixed-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "mixed-simple-continuous-transform" } + - match: { transforms.0.state: "/started|indexing/" } + + - do: + data_frame.stop_data_frame_transform: + transform_id: "mixed-simple-continuous-transform" + wait_for_completion: true + - match: { acknowledged: true } + + - do: + data_frame.get_data_frame_transform_stats: + transform_id: "mixed-simple-continuous-transform" + - match: { count: 1 } + - match: { transforms.0.id: "mixed-simple-continuous-transform" } + - match: { transforms.0.state: "stopped" } + + - do: + data_frame.delete_data_frame_transform: + transform_id: "mixed-simple-continuous-transform" +--- +"Test index mappings for latest internal index": + - do: + data_frame.put_data_frame_transform: + transform_id: "upgraded-simple-transform" + defer_validation: true + body: > + { + "source": { "index": "dataframe-transform-airline-data" }, + "dest": { "index": "upgraded-simple-transform-idx" }, + "pivot": { + "group_by": { "airline": {"terms": {"field": "airline"}}}, + "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} + } + } + - match: { acknowledged: true } + + - do: + indices.get_mapping: + index: .data-frame-internal-2 + - match: { \.data-frame-internal-2.mappings.dynamic: "false" } + - match: { \.data-frame-internal-2.mappings.properties.id.type: "keyword" } diff --git a/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestConstants.java b/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestConstants.java index 96ce8fb0b41..d95a08b467a 100644 --- a/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestConstants.java +++ b/x-pack/qa/src/main/java/org/elasticsearch/xpack/test/rest/XPackRestTestConstants.java @@ -23,7 +23,7 @@ public final class XPackRestTestConstants { // ML constants: public static final String ML_META_INDEX_NAME = ".ml-meta"; - public static final String AUDITOR_NOTIFICATIONS_INDEX = ".ml-notifications"; + public static final String AUDITOR_NOTIFICATIONS_INDEX = ".ml-notifications-000001"; public static final String CONFIG_INDEX = ".ml-config"; public static final String RESULTS_INDEX_PREFIX = ".ml-anomalies-"; public static final String STATE_INDEX_PREFIX = ".ml-state"; @@ -32,8 +32,13 @@ public final class XPackRestTestConstants { public static final List ML_PRE_V660_TEMPLATES = Collections.unmodifiableList(Arrays.asList( AUDITOR_NOTIFICATIONS_INDEX, ML_META_INDEX_NAME, STATE_INDEX_PREFIX, RESULTS_INDEX_PREFIX)); - public static final List ML_POST_V660_TEMPLATES = Collections.unmodifiableList(Arrays.asList( - AUDITOR_NOTIFICATIONS_INDEX, ML_META_INDEX_NAME, STATE_INDEX_PREFIX, RESULTS_INDEX_PREFIX, CONFIG_INDEX)); + public static final List ML_POST_V660_TEMPLATES = + Collections.unmodifiableList(Arrays.asList( + AUDITOR_NOTIFICATIONS_INDEX, + ML_META_INDEX_NAME, + STATE_INDEX_PREFIX, + RESULTS_INDEX_PREFIX, + CONFIG_INDEX)); // Data Frame constants: public static final String DATA_FRAME_INTERNAL_INDEX = ".data-frame-internal-1"; diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java index 85d4955cc7f..c9306eaf847 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java @@ -43,7 +43,7 @@ public class ADLdapUserSearchSessionFactoryTests extends AbstractActiveDirectory Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); /* * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. - * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * If we re-use an SSLContext, previously connected sessions can get re-established which breaks hostname * verification tests since a re-established connection does not perform hostname verification. */ diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java index faf225668e1..b3e470a05fc 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java @@ -78,7 +78,7 @@ public abstract class AbstractActiveDirectoryTestCase extends ESTestCase { }); /* * Prior to each test we reinitialize the socket factory with a new SSLService so that we get a new SSLContext. - * If we re-use a SSLContext, previously connected sessions can get re-established which breaks hostname + * If we re-use an SSLContext, previously connected sessions can get re-established which breaks hostname * verification tests since a re-established connection does not perform hostname verification. */ Settings.Builder builder = Settings.builder().put("path.home", createTempDir()); diff --git a/x-pack/snapshot-tool/licenses/httpclient-4.5.10.jar.sha1 b/x-pack/snapshot-tool/licenses/httpclient-4.5.10.jar.sha1 new file mode 100644 index 00000000000..b708efd0dd5 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/httpclient-4.5.10.jar.sha1 @@ -0,0 +1 @@ +7ca2e4276f4ef95e4db725a8cd4a1d1e7585b9e5 \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/httpclient-4.5.8.jar.sha1 b/x-pack/snapshot-tool/licenses/httpclient-4.5.8.jar.sha1 deleted file mode 100644 index 73f0d30c709..00000000000 --- a/x-pack/snapshot-tool/licenses/httpclient-4.5.8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c27c9d6f15435dc2b6947112027b418b0eef32b9 \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/httpcore-4.4.11.jar.sha1 b/x-pack/snapshot-tool/licenses/httpcore-4.4.11.jar.sha1 deleted file mode 100644 index 6d64372bfcc..00000000000 --- a/x-pack/snapshot-tool/licenses/httpcore-4.4.11.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -de748cf874e4e193b42eceea9fe5574fabb9d4df \ No newline at end of file diff --git a/x-pack/snapshot-tool/licenses/httpcore-4.4.12.jar.sha1 b/x-pack/snapshot-tool/licenses/httpcore-4.4.12.jar.sha1 new file mode 100644 index 00000000000..3c046171b30 --- /dev/null +++ b/x-pack/snapshot-tool/licenses/httpcore-4.4.12.jar.sha1 @@ -0,0 +1 @@ +21ebaf6d532bc350ba95bd81938fa5f0e511c132 \ No newline at end of file